import os import datasets _DESCRIPTION = """\ UTS_WTK_v1 """ _CITATION = """\ """ _BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_WTK_v1/raw/main/data/" TRAIN_FILE = "train.txt" class UTS_WTK_v1(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "tokens": datasets.Value("string"), "tags": datasets.Value("string"), } ), supervised_keys=None, homepage=None, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" train_file = dl_manager.download(os.path.join(_BASE_URL, TRAIN_FILE)) # dev_file = dl_manager.download(os.path.join(DATA_PATH, "dev.txt")) # test_file = dl_manager.download(os.path.join(DATA_PATH, "test.txt")) data_files = { "train": train_file, # "dev": dev_file, # "test": test_file, } splits = [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]} ), # datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}), # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}), ] return splits def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: guid = 0 for line in f: text = line.strip() yield guid, {"text": text} guid += 1