Datasets:

ArXiv:
License:
the-vault-function / the-vault.py
NamCyan's picture
update
2bbcda8
raw
history blame
7.09 kB
import os
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
_REPO_NAME = 'Fsoft-AIC/the-vault'
_LANG_TO_EXTENSION = {
"Python": [".py"],
"C": [".c", ".h"],
"C#": [".cs"],
"C++": [".cpp", ".hpp", ".c++", ".h++", ".cc", ".hh", ".C", ".H"],
"Go": [".go"],
"Java": [".java"],
"JavaScript": [".js"],
"PHP": [".php", ".php3", ".php4", ".php5", ".phps", ".phpt"],
"Ruby": [".rb"],
"Rust": [".rs"],
}
_DESCRIPTION = """The Vault"""
_HOMEPAGE = "https://huggingface.co/Fsoft-AIC"
_EXTENSION_TO_LANG = {}
for lang in _LANG_TO_EXTENSION:
for extension in _LANG_TO_EXTENSION[lang]:
_EXTENSION_TO_LANG[extension] = lang
_LANG_CONFIGS = ["all"] + list(_LANG_TO_EXTENSION.keys())
num_shard_split = {
'train/small': 2,
'train/medium': 4
}
splits = ["all"] + list(num_shard_split.keys())
class TheVaultConfig(datasets.BuilderConfig):
"""BuilderConfig for The Vault dataset."""
def __init__(self, *args, languages=["all"], split_set= "all", **kwargs):
"""BuilderConfig for the GitHub Code dataset.
Args:
languages (:obj:`List[str]`): List of languages to load.
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
name= split_set.replace("/", "_") + "_" + "+".join(languages),
**kwargs,
)
languages = set(languages)
assert all([language in _LANG_CONFIGS for language in languages]), f"Language not in {_LANG_CONFIGS}."
assert split_set in splits, "split_set {} not in {}.".format(split_set, splits)
if "all" in languages:
assert len(languages)==1, f"Passed 'all' together with other languages. {languages}"
self.filter_languages = False
else:
self.filter_languages = True
self.languages = set(languages)
self.split_set= split_set
class TheVault(datasets.GeneratorBasedBuilder):
"""The Vault dataset."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIG_CLASS = TheVaultConfig
BUILDER_CONFIGS = [TheVaultConfig(languages=[lang], split_set=spl) for lang in _LANG_CONFIGS for spl in splits]
DEFAULT_CONFIG_NAME = "all-all"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"original_string": datasets.Value("string"),
"original_docstring": datasets.Value("string"),
"code": datasets.Value("string"),
"docstring": datasets.Value("string"),
"code_tokens": datasets.Value("string"),
"docstring_tokens": datasets.Value("string"),
"short_docstring": datasets.Value("string"),
"comment": datasets.Value("string"),
"return_type": datasets.Value("string"),
"identifier": datasets.Value("string"),
"repo": datasets.Value("string"),
"path": datasets.Value("string"),
"language": datasets.Value("string"),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
license="Multiple: see the 'license' field of each sample.",
)
def _split_generators(self, dl_manager):
print(self.config.split_set)
if self.config.split_set == "all":
generators = []
for split in num_shard_split:
num_shards = num_shard_split[split]
data_files = [
f"data/{split}-{_index:05d}-of-{num_shards:05d}.parquet"
for _index in range(num_shards)
]
files = dl_manager.download(data_files)
generators.append(
datasets.SplitGenerator(
name=split.replace("/", "_"),
gen_kwargs={
"files": files,
},
),
)
return generators
else:
num_shards = num_shard_split[self.config.split_set]
data_files = [
f"data/{self.config.split_set}-{_index:05d}-of-{num_shards:05d}.parquet"
for _index in range(num_shards)
]
files = dl_manager.download(data_files)
return [
datasets.SplitGenerator(
name=self.config.split_set.replace("/", "_"),
gen_kwargs={
"files": files,
},
),
]
def _generate_examples(self, files):
key = 0
for file_idx, file in enumerate(files):
with open(file, "rb") as f:
parquet_file = pq.ParquetFile(f)
for batch_idx, record_batch in enumerate(parquet_file.iter_batches(batch_size=10_000)):
pa_table = pa.Table.from_batches([record_batch])
for row_index in range(pa_table.num_rows):
row = pa_table.slice(row_index, 1).to_pydict()
lang = row['language'][0]
if self.config.filter_languages and not lang in self.config.languages:
continue
yield key, {
"original_string": datasets.Value("string"),
"original_docstring": datasets.Value("string"),
"code": datasets.Value("string"),
"docstring": datasets.Value("string"),
"code_tokens": datasets.Value("string"),
"docstring_tokens": datasets.Value("string"),
"short_docstring": datasets.Value("string"),
"comment": datasets.Value("string"),
"return_type": datasets.Value("string"),
"identifier": datasets.Value("string"),
"repo": datasets.Value("string"),
"path": datasets.Value("string"),
"language": datasets.Value("string"),
}
key += 1
def lang_from_name(name):
for extension in _EXTENSION_TO_LANG:
if name.endswith(extension):
return _EXTENSION_TO_LANG[extension]