Datasets:
License:
"""The Indonesian Wiki Loader""" | |
import os | |
import re | |
import pandas as pd | |
import datasets | |
_CITATIONS = """\ | |
@ONLINE{wikidump, | |
author = "Wikimedia Foundation", | |
title = "Wikimedia Downloads", | |
url = "https://dumps.wikimedia.org"} | |
@ONLINE{wikipedia-hf, | |
title = "Huggingface Wikipedia Dataset", | |
url = "https://huggingface.co/datasets/wikipedia"}""" | |
_REPO_URL = "https://huggingface.co/datasets/sabilmakbar/indo_wiki" | |
_LICENSE = ( | |
"This work is licensed under the Creative Commons Attribution-ShareAlike " | |
"3.0 Unported License. To view a copy of this license, visit " | |
"http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to " | |
"Creative Commons, PO Box 1866, Mountain View, CA 94042, USA." | |
) | |
_INDO_WIKI_RAW_DESCRIPTION = """\ | |
Indonesian Wikipedia Data Repository contains Wikipedia Data from Wikipedia HF that focuses | |
on extraction in Indonesian Languange and Indonesian Local Languages, that some of them | |
are considered as low-resource languages or extremely low-resource languages""" | |
_INDO_WIKI_DEDUP_DESCRIPTION = """\ | |
This is a derivative of Indonesian Wikipedia Data Repository which is already pre-processed | |
by identifying and dropping duplicates to prevent boilerplate texts occuring in dataset""" | |
_AVAILABLE_DUMP_VERSION_DATE = ["20230901"] | |
_AVAILABLE_DUMP_LANGUAGES = ["ace", "ban", "bjn", "bug", "gor", "id", "jv", "map-bms", "min", "ms", "nia", "su", "tet"] | |
def _construct_dataset_from_dset_version_and_lang(date_ver: str, lang: str, mode: str): | |
_mode_to_folder_mapper = {"dedup": "indo_wiki_dedup_data", "raw": "indo_wiki_raw_data"} | |
_mode_to_file_suffix_mapper = {"dedup": "dataset_soft_hard_cleansed.csv", "raw": "raw_dataset.csv"} | |
return os.path.join(_mode_to_folder_mapper[mode], f"wiki_{lang}_{date_ver}_{_mode_to_file_suffix_mapper[mode]}") | |
class IndoWikiConfig(datasets.BuilderConfig): | |
"""BuilderConfig for IndoWiki.""" | |
def __init__(self, description: str=None, features: list=['url', 'title', 'text'], | |
data_url: str=None, date_stamp: str=None, lang: str=None, | |
mode = "dedup", **kwargs): | |
"""BuilderConfig for IndoWiki. | |
Args: | |
description: `string`, description of dataset | |
features: `list[string]`, list of the features that will appear in the | |
feature dict. Should not include "label" if it's a supervised. | |
data_url: `string`, url to download the data. | |
date_stamp: `string`, wikidump date_stamp for data available in repo. | |
lang: `string`, language to be loaded. | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
# validate configs | |
if mode not in ["dedup", "raw"]: | |
raise ValueError(f"Error occured! Expected values are 'dedup' or 'raw' for arg `mode`, received {mode}!") | |
if (lang is None or date_stamp is None) and data_url is None: | |
raise ValueError("Expected `data_url` is provided or both `date_stamp` and `lang` are provided!") | |
_mode_to_desc_mapper = {"dedup": _INDO_WIKI_DEDUP_DESCRIPTION, "raw":_INDO_WIKI_RAW_DESCRIPTION} | |
if date_stamp is not None and date_stamp not in _AVAILABLE_DUMP_VERSION_DATE: | |
raise ValueError("Provided `date_stamp` dataset versioning doesn't match! Please re-check") | |
if lang is not None and lang not in _AVAILABLE_DUMP_LANGUAGES: | |
raise ValueError("Provided `lang` doesn't match! Please re-check") | |
super(IndoWikiConfig, self).__init__(**kwargs) | |
self.features = features | |
# prioritize kwargs data_url | |
if data_url is not None: | |
self.data_url = data_url | |
else: | |
self.data_url = _construct_dataset_from_dset_version_and_lang(date_ver=date_stamp, lang=lang, mode=mode) | |
# auto-construct desc if not provided | |
if description is None: | |
self.description = _mode_to_desc_mapper[mode] + "\n" + f"From file path {self.data_url}" | |
#define citations & info URL internally in config class | |
self.citation = _CITATIONS | |
self.url = _REPO_URL | |
class IndoWiki(datasets.GeneratorBasedBuilder): | |
"""The IndoWiki Dataset.""" | |
#if name isn't provided, will create a dataset of all languages | |
DEFAULT_CONFIG_NAME = "indowiki_dedup_all" | |
BUILDER_CONFIG_CLASS = IndoWikiConfig | |
_newest_data_raw_all_langs = [_construct_dataset_from_dset_version_and_lang( | |
date_ver=sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1], lang=lang, mode="raw") for lang in _AVAILABLE_DUMP_LANGUAGES] | |
_newest_data_dedup_all_langs = [_construct_dataset_from_dset_version_and_lang( | |
date_ver=sorted(_AVAILABLE_DUMP_VERSION_DATE)[-1], lang=lang, mode="dedup") for lang in _AVAILABLE_DUMP_LANGUAGES] | |
BUILDER_CONFIGS = [ | |
IndoWikiConfig( | |
name="indowiki_all", | |
description=_INDO_WIKI_RAW_DESCRIPTION, | |
data_url=_newest_data_raw_all_langs | |
), | |
IndoWikiConfig( | |
name="indowiki_dedup_all", | |
description=_INDO_WIKI_DEDUP_DESCRIPTION, | |
data_url=_newest_data_dedup_all_langs | |
), | |
IndoWikiConfig( | |
name="indowiki_dedup_id_only", | |
lang="id", | |
date_stamp="20230901" | |
) | |
] | |
def _info(self): | |
features = {feature: datasets.Value("string") for feature in self.config.features} | |
return datasets.DatasetInfo( | |
description = self.config.description, | |
features = datasets.Features(features), | |
homepage = self.config.url, | |
citation = self.config.citation, | |
license=_LICENSE) | |
def _get_lang_name_from_data_url(data_url: str): | |
#lang code occurred after "wiki_" and before date versioning (using 8len date) | |
_list_folder_sep = data_url.split("/")[-1].split("_") | |
_min_pos = min([pos for pos, data in enumerate(_list_folder_sep) if bool(re.search("\d{8}", data))]) | |
return re.sub("[^\w\.]", "_", "_".join(_list_folder_sep[1:_min_pos])) | |
def _split_generators(self, dl_manager): | |
if self.config.name in ("indowiki_all", "indowiki_dedup_all"): | |
file_dict = {self._get_lang_name_from_data_url(file): file for file in self.config.data_url} | |
dl_dir = dl_manager.download_and_extract(file_dict) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split(split_name), | |
gen_kwargs={ | |
"data_file": file_name | |
} | |
) | |
#dl_dir is a dictionary containing lang or split as keyname and file path as value | |
for split_name, file_name in dl_dir.items()] | |
else: | |
dl_dir = dl_manager.download_and_extract(self.config.data_url) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"data_file": dl_dir | |
}, | |
) | |
] | |
def _generate_examples(self, data_file): | |
pd_df = pd.read_csv(data_file) | |
for _, row in pd_df.iterrows(): | |
example = {feature: row[feature] for feature in self.config.features} | |
idx = row["id"] | |
yield idx, example | |