File size: 3,135 Bytes
34bced1
 
 
 
d597db4
34bced1
 
 
 
 
6cc679c
34bced1
8ca5f84
565cd29
34bced1
 
8ca5f84
 
 
 
 
 
 
d597db4
8ca5f84
 
 
 
cd3603e
 
2ae25fd
8ca5f84
 
 
 
 
 
34bced1
 
 
 
 
2ae25fd
b1ee066
 
 
 
34bced1
 
 
 
 
 
 
 
 
8ca5f84
 
 
 
565cd29
34bced1
 
565cd29
 
34bced1
7abf816
 
565cd29
7abf816
 
565cd29
 
7abf816
34bced1
 
 
 
 
 
22e37f8
 
34bced1
9ee7640
ad00e52
 
2ae25fd
ad00e52
8d4b1d8
ad00e52
 
 
 
 
 
8d4b1d8
ad00e52
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import os
import datasets

_DESCRIPTION = """\
UTS_WTK
"""

_CITATION = """\
"""

_BASE_URL = "https://huggingface.co/datasets/undertheseanlp/UTS_WTK/resolve/main/data/"
TRAIN_FILE = "train.txt"
DEV_FILE = "validation.txt"
TEST_FILE = "test.txt"


class UTSWTKConfig(datasets.BuilderConfig):
    """BuilderConfig"""

    def __init__(self, **kwargs):
        super(UTSWTKConfig, self).__init__(**kwargs)


class UTSWTK(datasets.GeneratorBasedBuilder):
    """UTS Word Tokenize datasets"""
    VERSION = datasets.Version("1.0.0")
    BUILDER_CONFIGS = [
        UTSWTKConfig(
            name="small", version=VERSION, description="UTS_WTK Small"),
        # UTSWTKConfig(
        #     name="base", version=VERSION, description="UTS_WTK Base"),
        # UTSWTKConfig(
        #     name="large", version=VERSION, description="UTS_WTK Large"),
    ]

    BUILDER_CONFIG_CLASS = UTSWTKConfig

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    # "id": datasets.Value("string"),
                    "tokens": datasets.Sequence(datasets.Value("string")),
                    "tags": datasets.Sequence(
                        datasets.features.ClassLabel(names=["B-W", "I-W"])
                    ),
                }
            ),
            supervised_keys=None,
            homepage=None,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        subset_folder = self.config.name
        train_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TRAIN_FILE))
        dev_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, DEV_FILE))
        test_file = dl_manager.download(os.path.join(_BASE_URL, subset_folder, TEST_FILE))

        splits = [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"filepath": train_file}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={"filepath": dev_file},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"filepath": test_file}
            ),
        ]
        return splits

    def _generate_examples(self, filepath):
        with open(filepath, encoding="utf-8") as f:
            guid = 0
            tokens = []
            tags = []
            for line in f:
                if line == "" or line == "\n":
                    if tokens:
                        yield guid, {
                            # "id": str(guid),
                            "tokens": tokens,
                            "tags": tags,
                        }
                        guid += 1
                        tokens = []
                        tags = []
                else:
                    # each line is tab separated
                    splits = line.strip().split("\t")
                    tokens.append(splits[0])
                    tags.append(splits[1])