File size: 2,405 Bytes
34bced1 ad00e52 9ee7640 69469f2 ad00e52 8d4b1d8 ad00e52 32e5257 8d4b1d8 32e5257 ad00e52 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import os
import datasets
_DESCRIPTION = """\
UTS_WTK_v1
"""
_CITATION = """\
"""
_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_WTK_v1/raw/main/data/"
TRAIN_FILE = "train.txt"
class UTS_WTK_v1(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"tokens": datasets.Value("string"),
"tags": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
train_file = dl_manager.download(os.path.join(_BASE_URL, TRAIN_FILE))
# dev_file = dl_manager.download(os.path.join(DATA_PATH, "dev.txt"))
# test_file = dl_manager.download(os.path.join(DATA_PATH, "test.txt"))
data_files = {
"train": train_file,
# "dev": dev_file,
# "test": test_file,
}
splits = [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}
),
# datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
# datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
]
return splits
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
guid = 0
for line in f:
tokens = []
tags = []
if line == "" or line == "\n":
print(tokens)
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"tags": tags,
}
guid += 1
tokens = []
tags = []
else:
# each line is tab separated
print(line)
splits = line.strip().split("\t")
print(splits)
tokens.append(splits[0])
tags.append(splits[1])
|