tlunified-ner / tlunified-ner.py
ljvmiranda921's picture
Fix grammar in the README
53d8523
raw
history blame
3.77 kB
from typing import List
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """
This dataset contains the annotated TLUnified corpora from Cruz and Cheng
(2021). It is a curated sample of around 7,000 documents for the
named entity recognition (NER) task. The majority of the corpus are news
reports in Tagalog, resembling the domain of the original ConLL 2003. There
are three entity types: Person (PER), Organization (ORG), and Location (LOC).
"""
_LICENSE = """GNU GPL v3.0"""
_URL = "https://huggingface.co/ljvmiranda921/tlunified-ner"
_CLASSES = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
_VERSION = "1.0.0"
class TLUnifiedNERConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(TLUnifiedNER, self).__init__(**kwargs)
class TLUnifiedNER(datasets.GeneratorBasedBuilder):
"""Contains an annotated version of the TLUnified dataset from Cruz and Cheng (2021)."""
VERSION = datasets.Version(_VERSION)
def _info(self) -> "datasets.DatasetInfo":
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(names=_CLASSES)
),
}
),
homepage=_URL,
supervised_keys=None,
)
def _split_generators(
self, dl_manager: "datasets.builder.DownloadManager"
) -> List["datasets.SplitGenerator"]:
"""Return a list of SplitGenerators that organizes the splits."""
# The file extracts into {train,dev,test}.spacy files. The _generate_examples function
# below will define how these files are parsed.
data_files = {
"train": dl_manager.download_and_extract("corpus/iob/train.iob"),
"dev": dl_manager.download_and_extract("corpus/iob/dev.iob"),
"test": dl_manager.download_and_extract("corpus/iob/test.iob"),
}
return [
# fmt: off
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
# fmt: on
]
def _generate_examples(self, filepath: str):
"""Defines how examples are parsed from the IOB file."""
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
ner_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
# TLUnified-NER iob are separated by \t
token, ner_tag = line.split("\t")
tokens.append(token)
ner_tags.append(ner_tag.rstrip())
# Last example
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}