File size: 1,421 Bytes
8ca5f84 54cc554 8ca5f84 54cc554 8ca5f84 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import os
import shutil
from os.path import dirname, join
from datasets import load_dataset
from underthesea import word_tokenize
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
from underthesea.utils import logger
def create_wtk_dataset(text_dataset, output_folder):
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
os.makedirs(output_folder)
for split in ["train", "validation", "test"]:
sentences = text_dataset[split]["text"]
with open(join(output_folder, f"{split}.txt"), "w") as f:
for sentence in sentences:
items = word_tokenize(sentence)
for item in items:
tokens = item.split()
for i, token in enumerate(tokens):
if i > 0:
f.write(f"{token}\tI-W\n")
else:
f.write(f"{token}\tB-W\n")
f.write("\n")
pwd = dirname(__file__)
data_folder = join(pwd, "data")
text_dataset = load_dataset("undertheseanlp/UTS_Text", "small")
create_wtk_dataset(text_dataset, join(data_folder, "small"))
text_dataset = load_dataset("undertheseanlp/UTS_Text", "base")
create_wtk_dataset(text_dataset, join(data_folder, "base"))
text_dataset = load_dataset("undertheseanlp/UTS_Text", "large")
create_wtk_dataset(text_dataset, join(data_folder, "large"))
|