from datasets import load_dataset from underthesea import word_tokenize from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize from os.path import dirname, join from underthesea.utils import logger dataset = load_dataset("undertheseanlp/UTS_Text_v1") sentences = dataset["train"]["text"] pwd = dirname(__file__) train_file = join(pwd, "data/train.txt") dev_file = join(pwd, "data/dev.txt") test_file = join(pwd, "data/test.txt") # write empty files for file in [train_file, dev_file, test_file]: with open(file, "w") as f: f.write("") f1 = open(train_file, "a") f2 = open(dev_file, "a") f3 = open(test_file, "a") f = f1 content = "" for j, s in enumerate(sentences): if j % 100 == 0 and j > 0: if j >= 8000 and j < 9000: f = f2 else: f = f3 f.write(content) content = "" logger.info(j) words = word_tokenize(s) for word in words: tokens = tokenize(word) for i, token in enumerate(tokens): if i == 0: tag = "B-W" else: tag = "I-W" content += token + "\t" + tag + "\n" content += "\n" # write end f.write(content) content = "" logger.info(j) f1.close() f2.close() f3.close()