# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Stanzas""" import csv import datasets _CITATION = """ @InProceedings{--, author = {---}, title = {---}, booktitle = {---}, year = 2021, address = "---" } """ _DESCRIPTION = """\ Stanzas """ _HOMEPAGE = "https://github.com/versae/bibles/" STANZAS_BASE_URI = "https://huggingface.co/datasets/linhd-postdata/stanzas/resolve/main" STANZAS = { "validation": f"{STANZAS_BASE_URI}/eval.csv", "test": f"{STANZAS_BASE_URI}/test.csv", "train": f"{STANZAS_BASE_URI}/train.csv" } class StanzasConfig(datasets.BuilderConfig): """BuilderConfig for NorNE.""" def __init__(self, **kwargs): """BuilderConfig for Stanzas. Args: **kwargs: keyword arguments forwarded to super. """ super(StanzasConfig, self).__init__(**kwargs) class Stanzas(datasets.GeneratorBasedBuilder): """Stanzas""" StanzasConfig( name="default",version=datasets.Version("1.0.0"), description="Stanzas") def _info(self): labels = ['ovillejo', 'romance', 'octava_real', 'couplet', 'octava', 'cuarteta', 'copla_real', 'serventesio', 'haiku', 'cuaderna_vía', 'tercetillo', 'cantar', 'sextilla', 'espinela', 'lira', 'octavilla', 'chamberga', 'endecha_real', 'romance_arte_mayor', 'redondilla', 'septilla', 'silva_arromanzada', 'seguidilla', 'cuarteto_lira', 'cuarteto', 'décima_antigua', 'seguidilla_gitana', 'seguidilla_compuesta', 'copla_castellana', 'quintilla', 'soleá', 'estrofa_manriqueña', 'quinteto', 'terceto', 'sexta_rima', 'unknown', 'estrofa_sáfica', 'estrofa_francisco_de_la_torre', 'novena', 'sexteto', 'copla_arte_menor', 'copla_arte_mayor', 'terceto_monorrimo', 'copla_mixta', 'septeto', 'sexteto_lira'] self.labels = labels return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text": datasets.Value("string"), "label": datasets.ClassLabel(names=labels), } ), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): URLS = {key: STANZAS[key] for key in STANZAS.keys()} downloaded_files = dl_manager.download(URLS) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), ] def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as csv_file: csv_reader = csv.reader(csv_file, delimiter=",") next(csv_reader) # skip header for idx, (text, label) in enumerate(csv_reader): yield int(idx), { "text": text, "label": label, }