Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
78c67ab
1 Parent(s): 676ff74

Upload thaigov.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. thaigov.py +196 -0
thaigov.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The dataset consists of individual news articles, each corresponding to a unique URL at the
18
+ Thai government website (https://www.thaigov.go.th/). The dataset structure is as follows: a topic header is
19
+ followed by the content of the news article, which is then succeeded by a blank line and the source URL
20
+ """
21
+ import glob
22
+ import os
23
+ import re
24
+ from pathlib import Path
25
+ from typing import Dict, List, Tuple
26
+
27
+ import datasets
28
+ import jsonlines
29
+
30
+ from seacrowd.utils import schemas
31
+ from seacrowd.utils.configs import SEACrowdConfig
32
+ from seacrowd.utils.constants import Licenses, Tasks
33
+
34
+ _CITATION = """\
35
+ @article{,
36
+ author = {PyThaiNLP},
37
+ title = {thaigov-v2-corpus},
38
+ journal = {},
39
+ volume = {},
40
+ year = {2023},
41
+ url = {https://github.com/PyThaiNLP/thaigov-v2-corpus/tree/master},
42
+ doi = {},
43
+ biburl = {},
44
+ bibsource = {}
45
+ }
46
+ """
47
+
48
+ _DATASETNAME = "thaigov"
49
+
50
+ _DESCRIPTION = """\
51
+ This dataset is a corpus from ThaiGov.
52
+ """
53
+
54
+ _HOMEPAGE = "https://github.com/PyThaiNLP/thaigov-v2-corpus/tree/master/data"
55
+
56
+ _LANGUAGES = ["tha"]
57
+
58
+ _LICENSE = Licenses.PDDL.value
59
+
60
+ _LOCAL = False
61
+
62
+
63
+ _URLS = {
64
+ _DATASETNAME: "https://github.com/PyThaiNLP/thaigov-v2-corpus/archive/refs/heads/master.zip",
65
+ }
66
+
67
+ _SUPPORTED_TASKS = [Tasks.SUMMARIZATION]
68
+
69
+ _SOURCE_VERSION = "2.0.0"
70
+
71
+ _SEACROWD_VERSION = "2024.06.20"
72
+
73
+
74
+ class NewDataset(datasets.GeneratorBasedBuilder):
75
+ """This dataset is a corpus from ThaiGov, can be used for summarization tasks."""
76
+
77
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
78
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
79
+
80
+ BUILDER_CONFIGS = [
81
+ SEACrowdConfig(
82
+ name="thaigov_source",
83
+ version=SOURCE_VERSION,
84
+ description="thaigov source schema",
85
+ schema="source",
86
+ subset_id="thaigov",
87
+ ),
88
+ SEACrowdConfig(
89
+ name="thaigov_seacrowd_t2t",
90
+ version=SEACROWD_VERSION,
91
+ description="thaigov SEACrowd schema",
92
+ schema="seacrowd_t2t",
93
+ subset_id="thaigov",
94
+ ),
95
+ ]
96
+
97
+ DEFAULT_CONFIG_NAME = "thaigov_source"
98
+
99
+ def _info(self) -> datasets.DatasetInfo:
100
+
101
+ if self.config.schema == "source":
102
+ features = datasets.Features(
103
+ {
104
+ "id": datasets.Value("string"),
105
+ "src": datasets.Value("string"),
106
+ "tgt": datasets.Value("string"),
107
+ "url": datasets.Value("string"),
108
+ }
109
+ )
110
+ elif self.config.schema == "seacrowd_t2t":
111
+ features = schemas.text2text_features
112
+
113
+ return datasets.DatasetInfo(
114
+ description=_DESCRIPTION,
115
+ features=features,
116
+ homepage=_HOMEPAGE,
117
+ license=_LICENSE,
118
+ citation=_CITATION,
119
+ )
120
+
121
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
122
+ """Returns SplitGenerators."""
123
+
124
+ urls = _URLS[_DATASETNAME]
125
+ data_dir = dl_manager.download_and_extract(urls)
126
+ # Since the data is stored based on date extracted, it will follow the pattern data/year/month/day/{article_names}.txt
127
+ list_all_txt_files = list(glob.glob(os.path.join(data_dir, "thaigov-v2-corpus-master", "data", "*", "*", "*", "*.txt")))
128
+ all_data = []
129
+ counter = 0
130
+ for i in list_all_txt_files:
131
+ d = self._read_file(i)
132
+ all_data.append({"id": counter, "src": d["context"], "tgt": d["title"], "url": d["url"]})
133
+ counter += 1
134
+
135
+ self._write_jsonl(data_dir + "/train.jsonl", all_data)
136
+
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TRAIN,
140
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
141
+ gen_kwargs={
142
+ "filepath": os.path.join(data_dir, "train.jsonl"),
143
+ "split": "train",
144
+ },
145
+ ),
146
+ ]
147
+
148
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
149
+ """Yields examples as (key, example) tuples."""
150
+
151
+ if self.config.schema == "source":
152
+ i = 0
153
+ with jsonlines.open(filepath) as f:
154
+ for each_data in f.iter():
155
+ ex = {
156
+ "id": each_data["id"],
157
+ "src": each_data["src"],
158
+ "tgt": each_data["tgt"],
159
+ "url": each_data["url"],
160
+ }
161
+ yield i, ex
162
+ i += 1
163
+
164
+ elif self.config.schema == "seacrowd_t2t":
165
+ i = 0
166
+ with jsonlines.open(filepath) as f:
167
+ for each_data in f.iter():
168
+ ex = {"id": each_data["id"], "text_1": each_data["src"], "text_2": each_data["tgt"], "text_1_name": "input_document", "text_2_name": "output_summary"}
169
+ yield i, ex
170
+ i += 1
171
+
172
+ def _read_file(self, path):
173
+ text = {"title": "", "context": "", "url": ""}
174
+ page_view_line = 0
175
+ with open(path, "r", encoding="utf-8-sig") as f:
176
+ for n, line in enumerate(f):
177
+ line = line.strip()
178
+ if n == 0: # title line
179
+ text["title"] = line.strip()
180
+ else:
181
+ if line:
182
+ if re.match(r"^[\d,]+$", line):
183
+ page_view_line = n
184
+ continue
185
+ if line == "พิมพ์" or page_view_line and page_view_line < n: # skip 'print'
186
+ continue
187
+ if re.match(r"^ที่มา : http", line):
188
+ text["url"] = line.strip().split(" ")[-1]
189
+ else:
190
+ text["context"] += line.strip().replace("\xa0", "") + "\n"
191
+ return text
192
+
193
+ def _write_jsonl(self, filepath, values):
194
+ with jsonlines.open(filepath, "w") as writer:
195
+ for line in values:
196
+ writer.write(line)