File size: 268 Bytes
54cc554
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
from datasets import load_dataset
from underthesea import word_tokenize
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
from os.path import dirname, join

dataset = load_dataset("undertheseanlp/UTS_Text_v1")

sentences = dataset["train"]["text"]