Datasets:

Languages:
code
ArXiv:
License:
commitpackft / commitpackft.py
Alignment-Lab-AI's picture
Duplicate from bigcode/commitpackft
c2b865d verified
"""CommitPackFT"""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{muennighoff2023octopack,
title={OctoPack: Instruction Tuning Code Large Language Models},
author={Niklas Muennighoff and Qian Liu and Armel Zebaze and Qinkai Zheng and Binyuan Hui and Terry Yue Zhuo and Swayam Singh and Xiangru Tang and Leandro von Werra and Shayne Longpre},
journal={arXiv preprint arXiv:2308.07124},
year={2023}
}
"""
_DESCRIPTION = """\
CommitPackFT is is a 2GB filtered version of CommitPack to contain only high-quality commit messages that resemble natural language instructions.
"""
_LANG = ['abap', 'actionscript', 'ada', 'agda', 'antlr', 'apacheconf', 'api-blueprint', 'apl', 'applescript', 'arc', 'arduino', 'asciidoc', 'asp', 'aspectj', 'assembly', 'ats', 'augeas', 'autohotkey', 'awk', 'batchfile', 'bitbake', 'blitzmax', 'bluespec', 'boo', 'brainfuck', 'bro', 'c', 'c#', 'c++', 'c2hs-haskell', 'capn-proto', 'cartocss', 'ceylon', 'chapel', 'clean', 'clojure', 'cmake', 'coffeescript', 'coldfusion', 'coldfusion-cfc', 'common-lisp', 'creole', 'crystal', 'csound', 'css', 'csv', 'cucumber', 'cuda', 'cython', 'dart', 'desktop', 'diff', 'digital-command-language', 'dm', 'dns-zone', 'dockerfile', 'dylan', 'eagle', 'ecl', 'edn', 'eiffel', 'elixir', 'elm', 'emacs-lisp', 'emberscript', 'erlang', 'f#', 'factor', 'fancy', 'fish', 'flux', 'forth', 'fortran', 'freemarker', 'g-code', 'gas', 'gdscript', 'genshi', 'gentoo-ebuild', 'gettext-catalog', 'glsl', 'gnuplot', 'go', 'graphql', 'graphviz-dot', 'groff', 'groovy', 'groovy-server-pages', 'haml', 'handlebars', 'harbour', 'haskell', 'haxe', 'hcl', 'hlsl', 'html', 'html+django', 'html+eex', 'html+erb', 'html+php', 'http', 'hy', 'idris', 'igor-pro', 'inform-7', 'ini', 'inno-setup', 'io', 'ioke', 'isabelle', 'jade', 'jasmin', 'java', 'java-server-pages', 'javascript', 'jflex', 'json', 'json5', 'jsoniq', 'jsonld', 'jsx', 'julia', 'jupyter-notebook', 'kotlin', 'krl', 'latte', 'lean', 'less', 'lfe', 'lilypond', 'linker-script', 'liquid', 'literate-agda', 'literate-coffeescript', 'literate-haskell', 'livescript', 'llvm', 'logos', 'logtalk', 'lsl', 'lua', 'm4', 'makefile', 'mako', 'maple', 'markdown', 'mask', 'mathematica', 'mediawiki', 'metal', 'mirah', 'modelica', 'module-management-system', 'monkey', 'moonscript', 'mtml', 'mupad', 'nesc', 'netlinx', 'nginx', 'nimrod', 'ninja', 'nit', 'nix', 'nsis', 'nu', 'objective-c++', 'ocaml', 'ooc', 'opencl', 'openscad', 'org', 'oz', 'pan', 'parrot-assembly', 'parrot-internal-representation', 'pascal', 'pawn', 'perl', 'perl6', 'php', 'piglatin', 'pike', 'pod', 'pony', 'postscript', 'pov-ray-sdl', 'powershell', 'processing', 'propeller-spin', 'protocol-buffer', 'pure-data', 'purebasic', 'purescript', 'python', 'qmake', 'qml', 'r', 'racket', 'ragel-in-ruby-host', 'raml', 'rdoc', 'rebol', 'red', 'renpy', 'restructuredtext', 'rhtml', 'robotframework', 'rouge', 'ruby', 'rust', 'sage', 'saltstack', 'sas', 'sass', 'scala', 'scaml', 'scheme', 'scilab', 'scss', 'shell', 'slash', 'slim', 'smalltalk', 'smarty', 'smt', 'solidity', 'sourcepawn', 'sparql', 'sqf', 'sql', 'squirrel', 'standard-ml', 'stata', 'ston', 'stylus', 'supercollider', 'svg', 'swift', 'systemverilog', 'tcl', 'tcsh', 'tex', 'text', 'textile', 'thrift', 'toml', 'turtle', 'twig', 'typescript', 'unity3d-asset', 'unknown', 'uno', 'unrealscript', 'urweb', 'vala', 'vcl', 'vhdl', 'viml', 'visual-basic', 'volt', 'vue', 'webidl', 'wisp', 'xbase', 'xml', 'xpages', 'xproc', 'xquery', 'xs', 'xslt', 'xtend', 'yacc', 'yaml', 'yang', 'zephir', 'zig']
_LANG_TO_URL_ESCAPE = {
"c#": "c%23",
"f#": "f%23",
}
_URL = "https://huggingface.co/datasets/bigcode/commitpackft/resolve/main/data/{lang}/data.jsonl"
_VERSION = datasets.Version("1.0.0", "")
class CommitPackFT(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=lang,
description=f"CommitPackFT {lang}",
version=_VERSION,
)
for lang in _LANG
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"commit": datasets.Value("string"),
"old_file": datasets.Value("string"),
"new_file": datasets.Value("string"),
"old_contents": datasets.Value("string"),
"new_contents": datasets.Value("string"),
"subject": datasets.Value("string"),
"message": datasets.Value("string"),
"lang": datasets.Value("string"),
"license": datasets.Value("string"),
"repos": datasets.Value("string"),
}
),
supervised_keys=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download(_URL.format(lang=_LANG_TO_URL_ESCAPE.get(self.config.name,self.config.name)))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={'filepath': downloaded_files}
)
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
yield id_, {
"commit": data["commit"],
"old_file": data["old_file"],
"new_file": data["new_file"],
"old_contents": data["old_contents"],
"new_contents": data["new_contents"],
"subject": data["subject"],
"message": data["message"],
"lang": data["lang"],
"license": data["license"],
"repos": data["repos"],
}