UTS_WTK / UTS_WTK.py
rain1024's picture
update
cd3603e
raw
history blame
3.13 kB
import os
import datasets
_DESCRIPTION = """\
UTS_WTK
"""
_CITATION = """\
"""
_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_WTK/raw/main/data/"
TRAIN_FILE = "train.txt"
DEV_FILE = "validation.txt"
TEST_FILE = "test.txt"
class UTSWTKConfig(datasets.BuilderConfig):
"""BuilderConfig"""
def __init__(self, **kwargs):
super(UTSWTKConfig, self).__init__(**kwargs)
class UTSWTK(datasets.GeneratorBasedBuilder):
"""UTS Word Tokenize datasets"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
UTSWTKConfig(
name="small", version=VERSION, description="UTS_WTK Small"),
# UTSWTKConfig(
# name="small", version=VERSION, description="UTS_WTK Small"),
# UTSWTKConfig(
# name="large", version=VERSION, description="UTS_WTK Large"),
]
BUILDER_CONFIG_CLASS = UTSWTKConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(
datasets.features.ClassLabel(names=["B-W", "I-W"])
),
}
),
supervised_keys=None,
homepage=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
subset_folder = self.config.name
train_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TRAIN_FILE))
dev_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, DEV_FILE))
test_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TEST_FILE))
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": train_file}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": dev_file},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": test_file}
),
]
return splits
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
tags = []
for line in f:
if line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"tags": tags,
}
guid += 1
tokens = []
tags = []
else:
# each line is tab separated
splits = line.strip().split("\t")
tokens.append(splits[0])
tags.append(splits[1])