UTS_WTK / UTS_WTK_v1.py
rain1024's picture
update
860b4c4
raw
history blame
2.35 kB
import os
import datasets
_DESCRIPTION = """\
UTS_WTK_v1
"""
_CITATION = """\
"""
_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_WTK_v1/raw/main/data/"
TRAIN_FILE = "train.txt"
class UTS_WTK_v1(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Value("string"),
"tags": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
train_file = dl_manager.download(os.path.join(_BASE_URL, TRAIN_FILE))
# dev_file = dl_manager.download(os.path.join(DATA_PATH, "dev.txt"))
# test_file = dl_manager.download(os.path.join(DATA_PATH, "test.txt"))
data_files = {
"train": train_file,
# "dev": dev_file,
# "test": test_file,
}
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}
),
# datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
]
return splits
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
tags = []
for line in f:
if line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"tags": tags,
}
guid += 1
tokens = []
tags = []
else:
# each line is tab separated
splits = line.strip().split("\t")
tokens.append(splits[0])
tags.append(splits[1])