UTS_WTK / generate_dataset.py
rain1024's picture
update
52d8db3
raw
history blame
1.17 kB
from datasets import load_dataset
from underthesea import word_tokenize
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
from os.path import dirname, join
from underthesea.utils import logger
dataset = load_dataset("undertheseanlp/UTS_Text_v1")
sentences = dataset["train"]["text"]
pwd = dirname(__file__)
train_file = join(pwd, "data/train.txt")
dev_file = join(pwd, "data/dev.txt")
test_file = join(pwd, "data/test.txt")
# write empty files
for file in [train_file, dev_file, test_file]:
with open(file, "w") as f:
f.write("")
f = open(train_file, "a")
content = ""
for j, s in enumerate(sentences):
if j == 8000:
f.close()
f = open(dev_file, "a")
if j == 9000:
f.close()
f = open(test_file, "a")
words = word_tokenize(s)
for word in words:
tokens = tokenize(word)
for i, token in enumerate(tokens):
if i == 0:
tag = "B-W"
else:
tag = "I-W"
content += token + "\t" + tag + "\n"
content += "\n"
if j % 1000 == 999:
f.write(content)
content = ""
logger.info(j)
f.close()