|
"""Ottoman Literary Dataset from late 19th century up to early 20th century.""" |
|
|
|
import json |
|
import os |
|
import warnings |
|
from typing import Dict, List, Tuple |
|
|
|
import datasets |
|
import numpy as np |
|
from transformers import PreTrainedTokenizerBase |
|
|
|
_DESCRIPTION = """Second level categorization of Ottoman articles.""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "" |
|
|
|
_NAMES = [ |
|
"cultural_discourse_subject", |
|
"cultural_discourse_type", |
|
"literary_text_type", |
|
] |
|
|
|
|
|
_LABELS: Dict[str, Tuple[int, bool]] = { |
|
"cultural_discourse_subject": (8, True), |
|
"cultural_discourse_type": (7, True), |
|
"literary_text_type": (6, False), |
|
} |
|
|
|
|
|
def generate_urls(name: str) -> Dict[str, str]: |
|
return { |
|
"train": os.path.join(name, "train.json"), |
|
"val": os.path.join(name, "val.json"), |
|
"test": os.path.join(name, "test.json"), |
|
} |
|
|
|
|
|
class NonwestlitSecondLevelConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Dataset.""" |
|
|
|
def __init__( |
|
self, tokenizer: PreTrainedTokenizerBase = None, max_sequence_length: int = None, **kwargs |
|
): |
|
"""BuilderConfig for Dataset. |
|
|
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(NonwestlitSecondLevelConfig, self).__init__(**kwargs) |
|
self.tokenizer = tokenizer |
|
self.max_sequence_length = max_sequence_length |
|
|
|
@property |
|
def features(self): |
|
if self.name == "literary_text_type": |
|
labels = datasets.Value("uint8") |
|
else: |
|
labels = datasets.Sequence(datasets.Value("uint8")) |
|
return { |
|
"labels": labels, |
|
"input_ids": datasets.Value("string"), |
|
"title": datasets.Value("string"), |
|
"iid": datasets.Value("uint32"), |
|
"chunk_id": datasets.Value("uint32"), |
|
} |
|
|
|
|
|
class NonwestlitSecondLevelDataset(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("0.0.1") |
|
|
|
BUILDER_CONFIGS = [ |
|
NonwestlitSecondLevelConfig(name=name, version=version, description=name) |
|
for name, version in zip(_NAMES, [VERSION] * len(_NAMES)) |
|
] |
|
BUILDER_CONFIG_CLASS = NonwestlitSecondLevelConfig |
|
__current_id = 1 |
|
__current_chunk_id = 1 |
|
|
|
@property |
|
def __next_id(self): |
|
cid = self.__current_id |
|
self.__current_id += 1 |
|
return cid |
|
|
|
@property |
|
def __next_chunk_id(self): |
|
cid = self.__current_chunk_id |
|
self.__current_chunk_id += 1 |
|
return cid |
|
|
|
@property |
|
def label_info(self) -> Tuple[int, bool]: |
|
return _LABELS[self.config.name] |
|
|
|
def __reset_chunk_id(self): |
|
self.__current_chunk_id = 1 |
|
|
|
def _info(self): |
|
if self.config.tokenizer is None: |
|
raise RuntimeError( |
|
"For HF Datasets and for chunking to be carried out, 'tokenizer' must be given." |
|
) |
|
if "llama" in self.config.tokenizer.name_or_path: |
|
warnings.warn( |
|
"It is suggested to pass 'max_sequence_length' argument for Llama-2 model family. There " |
|
"might be errors for the data processing parts as `model_max_len` attributes are set to" |
|
"MAX_INT64 (?)." |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(self.config.features), |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = generate_urls(self.config.name) |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["val"]} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"]} |
|
), |
|
] |
|
|
|
def prepare_articles(self, article: str) -> List[str]: |
|
tokenizer = self.config.tokenizer |
|
model_inputs = tokenizer( |
|
article, |
|
truncation=True, |
|
padding=True, |
|
max_length=self.config.max_sequence_length, |
|
return_overflowing_tokens=True, |
|
) |
|
return tokenizer.batch_decode(model_inputs["input_ids"], skip_special_tokens=True) |
|
|
|
def _to_one_hot(self, labels: List[int], num_labels: int) -> List[int]: |
|
x = np.zeros(num_labels, dtype=np.float16) |
|
x[labels] = 1.0 |
|
return x.tolist() |
|
|
|
|
|
def _generate_examples(self, filepath: str): |
|
with open(filepath, encoding="utf-8") as f: |
|
dataset = json.load(f) |
|
|
|
num_labels, multi_label = self.label_info |
|
chunk_id = 0 |
|
for instance in dataset: |
|
iid = instance.get("id", self.__next_id) |
|
label = instance.get("label") |
|
if label is None: |
|
if not multi_label: |
|
continue |
|
else: |
|
label = self._to_one_hot(labels=[], num_labels=num_labels) |
|
elif isinstance(label, int): |
|
label = int(label) - 1 |
|
elif isinstance(label, str): |
|
if multi_label: |
|
label = [int(l) - 1 for l in label.split(",")] |
|
label = self._to_one_hot(label, num_labels) |
|
else: |
|
label = int(label) - 1 |
|
|
|
article = self.prepare_articles(instance["article"]) |
|
self.__reset_chunk_id() |
|
for chunk in article: |
|
chunk_inputs = { |
|
"iid": iid, |
|
"chunk_id": self.__next_chunk_id, |
|
"title": instance["title"], |
|
"input_ids": chunk, |
|
"labels": label, |
|
} |
|
yield chunk_id, chunk_inputs |
|
chunk_id += 1 |
|
|