letras-carnaval-cadiz / letras-carnaval-cadiz-old-script.py
davidlms's picture
Rename letras-carnaval-cadiz.py to letras-carnaval-cadiz-old-script.py
57a5f63 verified
from datasets import DatasetBuilder, SplitGenerator, Split, Features, Value, Sequence, BuilderConfig, GeneratorBasedBuilder
import datasets
from datasets.utils.download_manager import DownloadManager
from typing import List, Any, Tuple
import json
import os
import tempfile
_CITATION = """\
@misc{letrascarnavalcadiz2023,
author = {Romero Reyna, Iván and Franco Medinilla, Jesús Federico and Avecilla de la Herrán, Jesús Carlos},
title = {letras-carnaval-cadiz},
year = {2023},
url = {https://huggingface.co/datasets/IES-Rafael-Alberti/letras-carnaval-cadiz}
}
"""
_DESCRIPTION = """\
This dataset is a comprehensive collection of lyrics from the Carnaval de Cádiz, a significant cultural heritage of the city of Cádiz, Spain. Despite its cultural importance, there has been a lack of a structured database for these lyrics, hindering research and public access to this cultural heritage. This dataset aims to address this gap.
The dataset was created by the Cádiz AI Learning Community, a branch of the non-profit association Spain AI, and was developed by Iván Romero Reyna and Jesús Federico Franco Medinilla, students of the Specialization Course in Artificial Intelligence and Big Data at IES Rafael Alberti during the 2022-2023 academic year. The project is supervised by Jesús Carlos Avecilla de la Herrán, a computational linguist.
Collaboration is encouraged, with individuals able to verify the different records of the dataset at letrascarnavalcadiz.com, ensuring the transcription of the lyrics and all data are correct. New lyrics can also be added to the dataset. Corrections and additions are not immediately reflected in the dataset but are updated periodically.
For more information or to report a problem, you can write to contacto@letrascarnavalcadiz.com.
"""
# Mapping for song_type and group_type
song_type_mapping = {
1: "presentación",
2: "pasodoble/tango",
3: "cuplé",
4: "estribillo",
5: "popurrí",
6: "cuarteta",
}
group_type_mapping = {
1: "coro",
2: "comparsa",
3: "chirigota",
4: "cuarteto",
}
class CadizCarnivalConfig(BuilderConfig):
def __init__(self, **kwargs):
super().__init__(version=datasets.Version("1.0.2"), **kwargs)
class CadizCarnivalDataset(GeneratorBasedBuilder):
VERSION = "1.0.0"
BUILDER_CONFIGS = [
CadizCarnivalConfig(name="accurate", description="This part of my dataset covers accurate data"),
CadizCarnivalConfig(name="midaccurate", description="This part of my dataset covers midaccurate data"),
CadizCarnivalConfig(name="all", description="This part of my dataset covers both accurate and midaccurate data"),
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"id": Value("string"),
"authors": Sequence(Value("string")),
"song_type": Value("string"),
"year": Value("string"),
"group": Value("string"),
"group_type": Value("string"),
"lyrics": Sequence(Value("string")),
}),
supervised_keys=None,
homepage="https://letrascarnavalcadiz.com/",
citation=_CITATION,
)
def _split_generators(self, dl_manager: DownloadManager) -> List[SplitGenerator]:
urls_to_download = {
"accurate": "https://huggingface.co/datasets/IES-Rafael-Alberti/letras-carnaval-cadiz/raw/main/data/accurate-00000-of-00001.json",
"midaccurate": "https://huggingface.co/datasets/IES-Rafael-Alberti/letras-carnaval-cadiz/raw/main/data/midaccurate-00000-of-00001.json"
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
if self.config.name == "accurate":
return [SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["accurate"]})]
elif self.config.name == "midaccurate":
return [SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["midaccurate"]})]
else: # Default is "all"
# Load both JSON files and combine them
with open(downloaded_files["accurate"], 'r', encoding="utf-8") as f:
data_accurate = json.load(f)
with open(downloaded_files["midaccurate"], 'r', encoding="utf-8") as f:
data_midaccurate = json.load(f)
data_all = data_accurate + data_midaccurate
# Write the combined data to a temporary file
with tempfile.NamedTemporaryFile(delete=False, mode='w') as temp_file:
json.dump(data_all, temp_file)
temp_filepath = temp_file.name
return [SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": temp_filepath})]
def _generate_examples(self, filepath: str):
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for item in data:
item["song_type"] = song_type_mapping.get(item["song_type"], "indefinido")
item["group_type"] = group_type_mapping.get(item["group_type"], "indefinido")
yield item["id"], item