import os import datasets _DESCRIPTION = """\ UTS_WTK """ _CITATION = """\ """ _BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_WTK/raw/main/data/" TRAIN_FILE = "train.txt" DEV_FILE = "dev.txt" TEST_FILE = "test.txt" class UTSWTK(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "tags": datasets.Sequence( datasets.features.ClassLabel(names=["B-W", "I-W"]) ), } ), supervised_keys=None, homepage=None, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" train_file = dl_manager.download(os.path.join(_BASE_URL, TRAIN_FILE)) dev_file = dl_manager.download(os.path.join(_BASE_URL, DEV_FILE)) test_file = dl_manager.download(os.path.join(_BASE_URL, TEST_FILE)) splits = [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_file} ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dev_file}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": test_file} ), ] return splits def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: guid = 0 tokens = [] tags = [] for line in f: if line == "" or line == "\n": if tokens: yield guid, { "id": str(guid), "tokens": tokens, "tags": tags, } guid += 1 tokens = [] tags = [] else: # each line is tab separated splits = line.strip().split("\t") tokens.append(splits[0]) tags.append(splits[1])