File size: 952 Bytes
54cc554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a68c521
 
 
 
54cc554
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from datasets import load_dataset
from underthesea import word_tokenize
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
from os.path import dirname, join
from underthesea.utils import logger

dataset = load_dataset("undertheseanlp/UTS_Text_v1")

sentences = dataset["train"]["text"]
pwd = dirname(__file__)
data_file = join(pwd, "data/train.txt")
with open(data_file, "w") as f:
    f.write("")


f = open(data_file, "a")
content = ""
for j, s in enumerate(sentences):
    if j % 100 == 0 and j > 0:
        f.write(content)
        content = ""
        logger.info(j)
    words = word_tokenize(s)
    for word in words:
        tokens = tokenize(word)
        for i, token in enumerate(tokens):
            if i == 0:
                tag = "B-W"
            else:
                tag = "I-W"
            content += token + "\t" + tag + "\n"
    content += "\n"
# write end
f.write(content)
content = ""
logger.info(j)
f.close()