upload data
Browse files- data/{train/medium/c-00000-of-00001.parquet → test/java-00000-of-00001.parquet} +2 -2
- data/{train/small/c-00000-of-00001.parquet → test/javascript-00000-of-00001.parquet} +2 -2
- data/test/php-00000-of-00001.parquet +3 -0
- data/{train/medium → test}/python-00000-of-00001.parquet +2 -2
- data/train/full/php-00000-of-00013.parquet +3 -0
- data/train/full/php-00001-of-00013.parquet +3 -0
- data/train/full/php-00002-of-00013.parquet +3 -0
- data/train/full/php-00003-of-00013.parquet +3 -0
- data/train/full/php-00004-of-00013.parquet +3 -0
- data/train/full/php-00005-of-00013.parquet +3 -0
- data/train/full/php-00006-of-00013.parquet +3 -0
- data/train/full/php-00007-of-00013.parquet +3 -0
- data/train/full/php-00008-of-00013.parquet +3 -0
- data/train/full/php-00009-of-00013.parquet +3 -0
- data/train/full/php-00010-of-00013.parquet +3 -0
- data/train/full/php-00011-of-00013.parquet +3 -0
- data/train/full/php-00012-of-00013.parquet +3 -0
- data/train/small/java-00000-of-00002.parquet +3 -0
- data/train/small/java-00001-of-00002.parquet +3 -0
- data/train/small/javascript-00000-of-00001.parquet +3 -0
- data/train/small/php-00000-of-00001.parquet +3 -0
- data/train/small/python-00000-of-00002.parquet +3 -0
- data/train/small/python-00001-of-00002.parquet +3 -0
- data/validation/java-00000-of-00001.parquet +3 -0
- data/validation/javascript-00000-of-00001.parquet +3 -0
- data/validation/php-00000-of-00001.parquet +3 -0
- data/{train/small → validation}/python-00000-of-00001.parquet +2 -2
- the-vault-function.py +34 -44
data/{train/medium/c-00000-of-00001.parquet → test/java-00000-of-00001.parquet}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ffc020b13de011dbda8342aa9193b6aa0b1bb5d78bb2d591b0525a8c858fad4d
|
3 |
+
size 17359091
|
data/{train/small/c-00000-of-00001.parquet → test/javascript-00000-of-00001.parquet}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3cb0366a31f1c8556f41ed521e8563a571d72edeefb02908e1099cbad10f6ee0
|
3 |
+
size 19391965
|
data/test/php-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f8a38c2d8cfab79940e161dc791f3238afd5042729960adc0ed06f736387277
|
3 |
+
size 11838030
|
data/{train/medium → test}/python-00000-of-00001.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:89821a73c22f28423fb00ea2350aa07063a85498d8e5e07c08addfc855dfc949
|
3 |
+
size 23418686
|
data/train/full/php-00000-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad7ea6e25a2ebcff5cc65895b730d1f69afdc9871a55e2e136c8a54161ceb9c3
|
3 |
+
size 255191961
|
data/train/full/php-00001-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c6ad08396360c3fb8a5c820f88c646e2c7aa89fcbf62323e867f4754caac47f
|
3 |
+
size 252752452
|
data/train/full/php-00002-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0dfd46a58eac4e4d594abe9db1b2c2d34fe5e43ed4fd7643682c7a5e0851e799
|
3 |
+
size 255453507
|
data/train/full/php-00003-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b36d53b803a270a67d7d59e5822bcb221935da97d69f65b69d3dea144e51f124
|
3 |
+
size 227648449
|
data/train/full/php-00004-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:817aac034aaeaea1dd9d69e6124e4835b81abeb451c562b156a6c31c68e1375f
|
3 |
+
size 188634991
|
data/train/full/php-00005-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b0268db0c262299abe771d7668fe18a474d5df034a36799913f5dccb488fb4c3
|
3 |
+
size 275699298
|
data/train/full/php-00006-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bcba77ffe67e8168b7f91a890a3f298d7001dbadeb1726f1cc00dc29e4bd75f5
|
3 |
+
size 289921466
|
data/train/full/php-00007-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a4488bf62de60f74861f63bafc0efbf642d3e4ddb2e9f03aed7703052a92807d
|
3 |
+
size 298037480
|
data/train/full/php-00008-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fa1184326fa13748fa6bc3ed7eb9e5c192fafc315fd490747e2a1e5a0ee84af
|
3 |
+
size 305679087
|
data/train/full/php-00009-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad95abc934011199c9a104c3f4ac02e043239dd2d510ac4fb3e6d2948abebefd
|
3 |
+
size 310964749
|
data/train/full/php-00010-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e55bfb7a35b8424a8cd9bdd33d0f03d57a1a8940196e8b497e6c786e4c3b35d
|
3 |
+
size 316639499
|
data/train/full/php-00011-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c554a49c8dde61a8011dca2630ff2eff9d567c8c1041c795a543bba1c306615b
|
3 |
+
size 321365614
|
data/train/full/php-00012-of-00013.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:08704184fbb1d46783684fe0003ac0e69e57f4751fe0a0f6d557592bb68b9707
|
3 |
+
size 323625193
|
data/train/small/java-00000-of-00002.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76225daca59bc4df9c588137645a4be957799ed39a1aa5dc874c66700bc9323c
|
3 |
+
size 144381146
|
data/train/small/java-00001-of-00002.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6291fd1bff667592e70ff09b89f1edcbc9d65876f00cbe29d67118311dd822e6
|
3 |
+
size 150063939
|
data/train/small/javascript-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:680cf7aecc88e78eb220a32a9d13200c1be292a3727c0919dafef0b691eca553
|
3 |
+
size 83630027
|
data/train/small/php-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a329f2ef3c419ea03ff3fba238fa345968ff0bf4eed098905e0575b98020ba8c
|
3 |
+
size 166427665
|
data/train/small/python-00000-of-00002.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e67921a3235760bde1adea172dcbfbd5a5bf9e8610d968a60b67cb87d36f17e2
|
3 |
+
size 215008362
|
data/train/small/python-00001-of-00002.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:479e4e398b93885b3f46f84f1968fbc19476494fdf9886cb4097899c1d96a95b
|
3 |
+
size 213601878
|
data/validation/java-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d72a2b04f587866397331b8f3810fd671aee559d62a1acdecff28a7f22e1a1f
|
3 |
+
size 15895431
|
data/validation/javascript-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a43668bb5d195992c98bcf6da939547cb3afabfea703bc5f6567077a21eec4bf
|
3 |
+
size 23787608
|
data/validation/php-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51ce04636aa44d1f660d079a943582ced81f9d51fb04ee6be2764888b8f6f426
|
3 |
+
size 13451582
|
data/{train/small → validation}/python-00000-of-00001.parquet
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ce9b534b166e36968015d73a4084a1edc93b4cb688f5284f3a8e3e330f973fd
|
3 |
+
size 32660710
|
the-vault-function.py
CHANGED
@@ -9,14 +9,14 @@ _REPO_NAME = 'Fsoft-AIC/the-vault-function'
|
|
9 |
_LANG_TO_TEXT = {
|
10 |
"python": "python",
|
11 |
"c": "c",
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
}
|
21 |
|
22 |
|
@@ -35,7 +35,7 @@ for lang in _LANG_TO_TEXT:
|
|
35 |
|
36 |
|
37 |
|
38 |
-
_LANG_CONFIGS = ["all"] + list(
|
39 |
|
40 |
num_shard_split = {
|
41 |
'train/small/python': 1,
|
@@ -43,7 +43,7 @@ num_shard_split = {
|
|
43 |
'train/small/c': 1,
|
44 |
'train/medium/c': 1
|
45 |
}
|
46 |
-
_SPLIT_CONFIGS = ["all", "train", "train/small", "train/medium"]
|
47 |
|
48 |
class TheVaultFunctionConfig(datasets.BuilderConfig):
|
49 |
"""BuilderConfig for The Vault dataset."""
|
@@ -57,7 +57,7 @@ class TheVaultFunctionConfig(datasets.BuilderConfig):
|
|
57 |
"""
|
58 |
super().__init__(
|
59 |
*args,
|
60 |
-
name= "+".join([split.replace("/", "_") for split in split_set]) + "-" + "+".join(languages),
|
61 |
**kwargs,
|
62 |
)
|
63 |
|
@@ -66,28 +66,27 @@ class TheVaultFunctionConfig(datasets.BuilderConfig):
|
|
66 |
|
67 |
assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}."
|
68 |
assert all([split in _SPLIT_CONFIGS for split in split_set]), f"split_set {split_set} contains element not in {_SPLIT_CONFIGS}."
|
69 |
-
|
70 |
if "all" in split_set:
|
71 |
assert len(split_set)==1, f"Passed 'all' together with other split sets. {split_set}"
|
72 |
-
elif "train" in split_set:
|
73 |
for split in split_set:
|
74 |
-
if "train" in split and split != "train":
|
75 |
-
raise ValueError(f"Split set 'train' already contains '{split}'. Please only include one.")
|
76 |
|
77 |
if "all" in languages:
|
78 |
assert len(languages)==1, f"Passed 'all' together with other languages. {languages}"
|
79 |
-
|
80 |
-
|
81 |
-
# self.filter_languages = True
|
82 |
|
83 |
-
self.languages =
|
84 |
-
self.split_set= split_set
|
85 |
|
86 |
|
87 |
class TheVaultFunction(datasets.GeneratorBasedBuilder):
|
88 |
"""The Vault dataset."""
|
89 |
|
90 |
-
VERSION = datasets.Version("1.0.
|
91 |
|
92 |
BUILDER_CONFIG_CLASS = TheVaultFunctionConfig
|
93 |
BUILDER_CONFIGS = [TheVaultFunctionConfig(languages=[lang], split_set=[spl]) for lang in _LANG_CONFIGS for spl in _SPLIT_CONFIGS]
|
@@ -98,6 +97,7 @@ class TheVaultFunction(datasets.GeneratorBasedBuilder):
|
|
98 |
return datasets.DatasetInfo(
|
99 |
description=_DESCRIPTION,
|
100 |
features=datasets.Features({
|
|
|
101 |
"repo": datasets.Value("string"),
|
102 |
"path": datasets.Value("string"),
|
103 |
"license": datasets.Sequence(datasets.Value("string")),
|
@@ -141,7 +141,7 @@ class TheVaultFunction(datasets.GeneratorBasedBuilder):
|
|
141 |
"type": datasets.Value("string"),
|
142 |
"docstring": datasets.Value("string"),
|
143 |
"docstring_tokens": datasets.Sequence(datasets.Value("string")),
|
144 |
-
"default": datasets.Value("
|
145 |
"is_optional": datasets.Value("bool")
|
146 |
}
|
147 |
],
|
@@ -151,7 +151,7 @@ class TheVaultFunction(datasets.GeneratorBasedBuilder):
|
|
151 |
"type": datasets.Value("string"),
|
152 |
"docstring": datasets.Value("string"),
|
153 |
"docstring_tokens": datasets.Sequence(datasets.Value("string")),
|
154 |
-
"default": datasets.Value("
|
155 |
"is_optional": datasets.Value("bool")
|
156 |
}
|
157 |
],
|
@@ -172,22 +172,18 @@ class TheVaultFunction(datasets.GeneratorBasedBuilder):
|
|
172 |
|
173 |
def _split_generators(self, dl_manager):
|
174 |
generators = []
|
175 |
-
split_set = list(self.config.split_set)
|
176 |
-
languages = list(self.config.languages)
|
177 |
|
178 |
if "all" in split_set:
|
179 |
-
split_set =
|
180 |
|
181 |
-
load_full_train = False
|
182 |
if "train" in split_set:
|
183 |
split_set.remove('train')
|
184 |
-
split_set.extend(["train/
|
185 |
-
load_full_train = True
|
186 |
|
187 |
if "all" in languages:
|
188 |
languages = _LANG_CONFIGS[1:]
|
189 |
|
190 |
-
train_split_files = []
|
191 |
for split in split_set:
|
192 |
split_files = []
|
193 |
for language in languages:
|
@@ -199,43 +195,37 @@ class TheVaultFunction(datasets.GeneratorBasedBuilder):
|
|
199 |
files = dl_manager.download(data_files)
|
200 |
split_files.extend(files)
|
201 |
|
202 |
-
if load_full_train and "train" in split:
|
203 |
-
|
204 |
-
else:
|
|
|
205 |
generators.append(
|
206 |
datasets.SplitGenerator(
|
207 |
-
name=split.replace("/", "_"),
|
208 |
gen_kwargs={
|
209 |
"files": split_files,
|
210 |
},
|
211 |
),
|
212 |
)
|
213 |
|
214 |
-
if load_full_train and train_split_files:
|
215 |
-
|
216 |
|
217 |
|
218 |
return generators
|
219 |
|
220 |
def _generate_examples(self, files):
|
221 |
-
print(files)
|
222 |
key = 0
|
223 |
for file_idx, file in enumerate(files):
|
224 |
with open(file, "rb") as f:
|
225 |
parquet_file = pq.ParquetFile(f)
|
226 |
for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
|
227 |
pa_table = pa.Table.from_batches([record_batch])
|
228 |
-
print(pa_table)
|
229 |
for row_index in range(pa_table.num_rows):
|
230 |
row = pa_table.slice(row_index, 1).to_pydict()
|
231 |
-
print(row.keys())
|
232 |
-
print(row['comment'])
|
233 |
-
# lang = row['language'][0]
|
234 |
-
|
235 |
-
# if self.config.filter_languages and not lang in self.config.languages:
|
236 |
-
# continue
|
237 |
|
238 |
yield key, {
|
|
|
239 |
"repo": row['repo'][0],
|
240 |
"path": row['path'][0],
|
241 |
"license": row['license'][0],
|
|
|
9 |
_LANG_TO_TEXT = {
|
10 |
"python": "python",
|
11 |
"c": "c",
|
12 |
+
"c#": "c_sharp",
|
13 |
+
"c++": "cpp",
|
14 |
+
"go": "go",
|
15 |
+
"Java": "java",
|
16 |
+
"javascript": "javascript",
|
17 |
+
"php": "php",
|
18 |
+
"ruby": "ruby",
|
19 |
+
"rust": "rust",
|
20 |
}
|
21 |
|
22 |
|
|
|
35 |
|
36 |
|
37 |
|
38 |
+
_LANG_CONFIGS = ["all"] + list(_LANG_TO_TEXT.keys())
|
39 |
|
40 |
num_shard_split = {
|
41 |
'train/small/python': 1,
|
|
|
43 |
'train/small/c': 1,
|
44 |
'train/medium/c': 1
|
45 |
}
|
46 |
+
_SPLIT_CONFIGS = ["all", "train", "train/small", "train/medium", "train/full", "validation", "test"]
|
47 |
|
48 |
class TheVaultFunctionConfig(datasets.BuilderConfig):
|
49 |
"""BuilderConfig for The Vault dataset."""
|
|
|
57 |
"""
|
58 |
super().__init__(
|
59 |
*args,
|
60 |
+
name= "+".join([split.replace("/", "_") for split in split_set]) + "-" + "+".join([_LANG_TO_TEXT[lang] for lang in languages]),
|
61 |
**kwargs,
|
62 |
)
|
63 |
|
|
|
66 |
|
67 |
assert all([language in _LANG_CONFIGS for language in languages]), f"languages {languages} contains language not in {_LANG_CONFIGS}."
|
68 |
assert all([split in _SPLIT_CONFIGS for split in split_set]), f"split_set {split_set} contains element not in {_SPLIT_CONFIGS}."
|
69 |
+
|
70 |
if "all" in split_set:
|
71 |
assert len(split_set)==1, f"Passed 'all' together with other split sets. {split_set}"
|
72 |
+
elif "train" in split_set or "train/full" in split_set:
|
73 |
for split in split_set:
|
74 |
+
if ("train" in split and split != "train") or ("train" in split and split != "train/full"):
|
75 |
+
raise ValueError(f"Split set 'train' (or 'train/full) already contains '{split}'. Please only include one.")
|
76 |
|
77 |
if "all" in languages:
|
78 |
assert len(languages)==1, f"Passed 'all' together with other languages. {languages}"
|
79 |
+
else:
|
80 |
+
languages = [_LANG_TO_TEXT[lang] for lang in languages] # Convert to text name
|
|
|
81 |
|
82 |
+
self.languages = list(languages)
|
83 |
+
self.split_set= list(split_set)
|
84 |
|
85 |
|
86 |
class TheVaultFunction(datasets.GeneratorBasedBuilder):
|
87 |
"""The Vault dataset."""
|
88 |
|
89 |
+
VERSION = datasets.Version("1.0.0")
|
90 |
|
91 |
BUILDER_CONFIG_CLASS = TheVaultFunctionConfig
|
92 |
BUILDER_CONFIGS = [TheVaultFunctionConfig(languages=[lang], split_set=[spl]) for lang in _LANG_CONFIGS for spl in _SPLIT_CONFIGS]
|
|
|
97 |
return datasets.DatasetInfo(
|
98 |
description=_DESCRIPTION,
|
99 |
features=datasets.Features({
|
100 |
+
"hexsha": datasets.Value("string"),
|
101 |
"repo": datasets.Value("string"),
|
102 |
"path": datasets.Value("string"),
|
103 |
"license": datasets.Sequence(datasets.Value("string")),
|
|
|
141 |
"type": datasets.Value("string"),
|
142 |
"docstring": datasets.Value("string"),
|
143 |
"docstring_tokens": datasets.Sequence(datasets.Value("string")),
|
144 |
+
"default": datasets.Value("string"),
|
145 |
"is_optional": datasets.Value("bool")
|
146 |
}
|
147 |
],
|
|
|
151 |
"type": datasets.Value("string"),
|
152 |
"docstring": datasets.Value("string"),
|
153 |
"docstring_tokens": datasets.Sequence(datasets.Value("string")),
|
154 |
+
"default": datasets.Value("string"),
|
155 |
"is_optional": datasets.Value("bool")
|
156 |
}
|
157 |
],
|
|
|
172 |
|
173 |
def _split_generators(self, dl_manager):
|
174 |
generators = []
|
|
|
|
|
175 |
|
176 |
if "all" in split_set:
|
177 |
+
split_set = ["train/full", "validation", "test"]
|
178 |
|
|
|
179 |
if "train" in split_set:
|
180 |
split_set.remove('train')
|
181 |
+
split_set.extend(["train/full"])
|
|
|
182 |
|
183 |
if "all" in languages:
|
184 |
languages = _LANG_CONFIGS[1:]
|
185 |
|
186 |
+
# train_split_files = []
|
187 |
for split in split_set:
|
188 |
split_files = []
|
189 |
for language in languages:
|
|
|
195 |
files = dl_manager.download(data_files)
|
196 |
split_files.extend(files)
|
197 |
|
198 |
+
# if load_full_train and "train" in split:
|
199 |
+
# train_split_files.extend(split_files)
|
200 |
+
# else:
|
201 |
+
|
202 |
generators.append(
|
203 |
datasets.SplitGenerator(
|
204 |
+
name="train" if split == "train/full" else split.replace("/", "_"),
|
205 |
gen_kwargs={
|
206 |
"files": split_files,
|
207 |
},
|
208 |
),
|
209 |
)
|
210 |
|
211 |
+
# if load_full_train and train_split_files:
|
212 |
+
# generators = [datasets.SplitGenerator(name="train", gen_kwargs={"files": train_split_files})] + generators
|
213 |
|
214 |
|
215 |
return generators
|
216 |
|
217 |
def _generate_examples(self, files):
|
|
|
218 |
key = 0
|
219 |
for file_idx, file in enumerate(files):
|
220 |
with open(file, "rb") as f:
|
221 |
parquet_file = pq.ParquetFile(f)
|
222 |
for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
|
223 |
pa_table = pa.Table.from_batches([record_batch])
|
|
|
224 |
for row_index in range(pa_table.num_rows):
|
225 |
row = pa_table.slice(row_index, 1).to_pydict()
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
|
227 |
yield key, {
|
228 |
+
"hexsha": row['hexsha'][0],
|
229 |
"repo": row['repo'][0],
|
230 |
"path": row['path'][0],
|
231 |
"license": row['license'][0],
|