|
from datasets import load_dataset |
|
from underthesea import word_tokenize |
|
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize |
|
from os.path import dirname, join |
|
from underthesea.utils import logger |
|
|
|
dataset = load_dataset("undertheseanlp/UTS_Text_v1") |
|
|
|
sentences = dataset["train"]["text"] |
|
pwd = dirname(__file__) |
|
data_file = join(pwd, "data/train.txt") |
|
with open(data_file, "w") as f: |
|
f.write("") |
|
|
|
|
|
f = open(data_file, "a") |
|
content = "" |
|
for j, s in enumerate(sentences): |
|
if j % 100 == 0 and j > 0: |
|
f.write(content) |
|
content = "" |
|
logger.info(j) |
|
words = word_tokenize(s) |
|
for word in words: |
|
tokens = tokenize(word) |
|
for i, token in enumerate(tokens): |
|
if i == 0: |
|
tag = "B-W" |
|
else: |
|
tag = "I-W" |
|
content += token + "\t" + tag + "\n" |
|
content += "\n" |
|
|
|
f.write(content) |
|
content = "" |
|
logger.info(j) |
|
f.close() |
|
|