Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Tagalog
Size:
1K - 10K
ArXiv:
DOI:
License:
File size: 3,767 Bytes
1b0f91f 5cee948 53d8523 5cee948 1b0f91f 969f2e4 1b0f91f 969f2e4 1b0f91f 969f2e4 1b0f91f 969f2e4 1b0f91f 969f2e4 1b0f91f 969f2e4 1b0f91f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
from typing import List
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """
This dataset contains the annotated TLUnified corpora from Cruz and Cheng
(2021). It is a curated sample of around 7,000 documents for the
named entity recognition (NER) task. The majority of the corpus are news
reports in Tagalog, resembling the domain of the original ConLL 2003. There
are three entity types: Person (PER), Organization (ORG), and Location (LOC).
"""
_LICENSE = """GNU GPL v3.0"""
_URL = "https://huggingface.co/ljvmiranda921/tlunified-ner"
_CLASSES = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
_VERSION = "1.0.0"
class TLUnifiedNERConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(TLUnifiedNER, self).__init__(**kwargs)
class TLUnifiedNER(datasets.GeneratorBasedBuilder):
"""Contains an annotated version of the TLUnified dataset from Cruz and Cheng (2021)."""
VERSION = datasets.Version(_VERSION)
def _info(self) -> "datasets.DatasetInfo":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(names=_CLASSES)
),
}
),
homepage=_URL,
supervised_keys=None,
)
def _split_generators(
self, dl_manager: "datasets.builder.DownloadManager"
) -> List["datasets.SplitGenerator"]:
"""Return a list of SplitGenerators that organizes the splits."""
# The file extracts into {train,dev,test}.spacy files. The _generate_examples function
# below will define how these files are parsed.
data_files = {
"train": dl_manager.download_and_extract("corpus/iob/train.iob"),
"dev": dl_manager.download_and_extract("corpus/iob/dev.iob"),
"test": dl_manager.download_and_extract("corpus/iob/test.iob"),
}
return [
# fmt: off
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
# fmt: on
]
def _generate_examples(self, filepath: str):
"""Defines how examples are parsed from the IOB file."""
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
# TLUnified-NER iob are separated by \t
token, ner_tag = line.split("\t")
tokens.append(token)
ner_tags.append(ner_tag.rstrip())
# Last example
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
|