Datasets:

Languages:
Khmer
ArXiv:
License:
holylovenia commited on
Commit
8034d56
·
verified ·
1 Parent(s): aaf1e17

Upload sleukrith_ocr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. sleukrith_ocr.py +249 -0
sleukrith_ocr.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import struct
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+ import numpy as np
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.configs import SEACrowdConfig
25
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
26
+
27
+ _CITATION = """\
28
+ @inproceedings{10.1145/3151509.3151510,
29
+ author = {Valy, Dona and Verleysen, Michel and Chhun, Sophea and Burie, Jean-Christophe},
30
+ title = {A New Khmer Palm Leaf Manuscript Dataset for Document Analysis and Recognition: SleukRith Set},
31
+ year = {2017},
32
+ isbn = {9781450353908},
33
+ publisher = {Association for Computing Machinery},
34
+ address = {New York, NY, USA},
35
+ url = {https://doi.org/10.1145/3151509.3151510},
36
+ doi = {10.1145/3151509.3151510},
37
+ booktitle = {Proceedings of the 4th International Workshop on Historical Document Imaging and Processing},
38
+ pages = {1-6},
39
+ numpages = {6},
40
+ location = {Kyoto, Japan},
41
+ series = {HIP '17}
42
+ }
43
+ """
44
+
45
+ _DATASETNAME = "sleukrith_ocr"
46
+
47
+ _DESCRIPTION = """\
48
+ SleukRith Set is the first dataset specifically created for Khmer palm leaf
49
+ manuscripts. The dataset consists of annotated data from 657 pages of digitized
50
+ palm leaf manuscripts which are selected arbitrarily from a large collection of
51
+ existing and also recently digitized images. The dataset contains three types of
52
+ data: isolated characters, words, and lines. Each type of data is annotated with
53
+ the ground truth information which is very useful for evaluating and serving as
54
+ a training set for common document analysis tasks such as character/text
55
+ recognition, word/line segmentation, and word spotting.
56
+
57
+ The character mapping (per label) is not explained anywhere in the dataset homepage,
58
+ thus the labels are simply numbered from 0 to 110, each corresponds to a distinct character.
59
+ """
60
+
61
+ _HOMEPAGE = "https://github.com/donavaly/SleukRith-Set"
62
+
63
+ _LANGUAGES = ["khm"]
64
+
65
+ _LICENSE = Licenses.UNKNOWN.value
66
+
67
+ _LOCAL = False
68
+
69
+ _URLS = {
70
+ # this URL corresponds to the raw unprocessed data (whole images); unused in this dataloader
71
+ "sleukrith-set": {
72
+ "images": "https://drive.google.com/uc?export=download&id=19JIxAjjXWuJ7mEyUl5-xRr2B8uOb-GKk", # 1GB
73
+ "annotated-data": "https://drive.google.com/uc?export=download&id=1Xi5ucRUb1e9TUU-nv2rCUYv2ANVsXYDk", # 11.7MB
74
+ },
75
+ # this URL corresponds to the processed data (per characters); used in this dataloader
76
+ "isolated-characters": {
77
+ "images_train": "https://drive.google.com/uc?export=download&id=1KXf5937l-Xu_sXsGPuQOgFt4zRaXlSJ5", # 249MB
78
+ "images_test": "https://drive.google.com/uc?export=download&id=1KSt5AiRIilRryh9GBcxyUUhnbiScdQ-9", # 199MB
79
+ "labels_train": "https://drive.google.com/uc?export=download&id=1IbmLg-4l-3BtRhprDWWvZjCp7lqap0Z-", # 442KB
80
+ "labels_test": "https://drive.google.com/uc?export=download&id=1GYcaUInkxtuuQps-qA38u-4zxK7HgrAB", # 354KB
81
+ },
82
+ }
83
+
84
+ _SUPPORTED_TASKS = [Tasks.OPTICAL_CHARACTER_RECOGNITION]
85
+ _SEACROWD_SCHEMA = f"seacrowd_{TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()}" # imtext
86
+
87
+ _SOURCE_VERSION = "1.0.0"
88
+
89
+ _SEACROWD_VERSION = "2024.06.20"
90
+
91
+
92
+ class SleukRithSet(datasets.GeneratorBasedBuilder):
93
+ """Annotated OCR dataset from 657 pages of digitized Khmer palm leaf manuscripts."""
94
+
95
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
96
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
97
+
98
+ BUILDER_CONFIGS = [
99
+ SEACrowdConfig(
100
+ name=f"{_DATASETNAME}_source",
101
+ version=SOURCE_VERSION,
102
+ description=f"{_DATASETNAME} source schema",
103
+ schema="source",
104
+ subset_id=_DATASETNAME,
105
+ ),
106
+ SEACrowdConfig(
107
+ name=f"{_DATASETNAME}_{_SEACROWD_SCHEMA}",
108
+ version=SEACROWD_VERSION,
109
+ description=f"{_DATASETNAME} SEACrowd schema",
110
+ schema=_SEACROWD_SCHEMA,
111
+ subset_id=_DATASETNAME,
112
+ ),
113
+ ]
114
+
115
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
116
+
117
+ def _info(self) -> datasets.DatasetInfo:
118
+ if self.config.schema == "source":
119
+ features = datasets.Features(
120
+ {
121
+ "image_path": datasets.Value("string"),
122
+ "label": datasets.ClassLabel(names=[i for i in range(111)]),
123
+ }
124
+ )
125
+ elif self.config.schema == _SEACROWD_SCHEMA:
126
+ features = schemas.image_text_features(label_names=[i for i in range(111)])
127
+
128
+ return datasets.DatasetInfo(
129
+ description=_DESCRIPTION,
130
+ features=features,
131
+ homepage=_HOMEPAGE,
132
+ license=_LICENSE,
133
+ citation=_CITATION,
134
+ )
135
+
136
+ def module_exists(self, module_name):
137
+ try:
138
+ __import__(module_name)
139
+ except ImportError:
140
+ return False
141
+ else:
142
+ return True
143
+
144
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
145
+ """Returns SplitGenerators."""
146
+ # check if gdown is installed
147
+ if self.module_exists("gdown"):
148
+ import gdown
149
+ else:
150
+ raise ImportError("Please install `gdown` to enable downloading data from google drive.")
151
+
152
+ # create custom data directory
153
+ data_dir = Path.cwd() / "data" / "sleukrith_ocr"
154
+ data_dir.mkdir(parents=True, exist_ok=True)
155
+
156
+ # reliable google drive downloader
157
+ data_paths = {}
158
+ for key, value in _URLS["isolated-characters"].items():
159
+ idx = value.rsplit("=", maxsplit=1)[-1]
160
+ output = f"{data_dir}/{key}"
161
+ data_paths[key] = Path(output)
162
+
163
+ if not Path(output).exists():
164
+ gdown.download(id=idx, output=output)
165
+ else:
166
+ print(f"File {output} already exists, skipping download.")
167
+
168
+ return [
169
+ datasets.SplitGenerator(
170
+ name=datasets.Split.TRAIN,
171
+ gen_kwargs={
172
+ "image_data": data_paths["images_train"],
173
+ "label_data": data_paths["labels_train"],
174
+ "split": "train",
175
+ },
176
+ ),
177
+ datasets.SplitGenerator(
178
+ name=datasets.Split.TEST,
179
+ gen_kwargs={
180
+ "image_data": data_paths["images_test"],
181
+ "label_data": data_paths["labels_test"],
182
+ "split": "test",
183
+ },
184
+ ),
185
+ ]
186
+
187
+ def _generate_examples(self, image_data: Path, label_data: Path, split: str) -> Tuple[int, Dict]:
188
+ """Yields examples as (key, example) tuples."""
189
+ # check if PIL is installed
190
+ if self.module_exists("PIL"):
191
+ from PIL import Image
192
+ else:
193
+ raise ImportError("Please install `pillow` to process images.")
194
+
195
+ # load images
196
+ with open(image_data, "rb") as file:
197
+ # read and unpack the first 12 bytes for metadata
198
+ width, height, nb_samples = struct.unpack(">iii", file.read(12))
199
+
200
+ images = []
201
+ for _ in range(nb_samples):
202
+ # read and convert binary data to np array and reshape
203
+ image_data = file.read(width * height)
204
+ image = np.frombuffer(image_data, dtype=np.uint8).reshape((height, width))
205
+ images.append(image)
206
+
207
+ # save images and store path
208
+ image_paths = []
209
+ for i, image in enumerate(images):
210
+ image_dir = Path.cwd() / "data" / "sleukrith_ocr" / split
211
+ image_dir.mkdir(exist_ok=True)
212
+ image_path = f"{image_dir}/image_{i}.png"
213
+
214
+ if not Path(image_path).exists():
215
+ Image.fromarray(image).save(image_path)
216
+
217
+ assert Path(image_path).exists(), f"Image {image_path} not found."
218
+ image_paths.append(image_path)
219
+
220
+ # load labels
221
+ with open(label_data, "rb") as file:
222
+ # read and unpack the first 8 bytes for nb_classes and nb_samples
223
+ nb_classes, nb_samples = struct.unpack(">ii", file.read(8))
224
+ assert nb_samples == len(image_paths), "Number of labels do not match number of images."
225
+
226
+ labels = []
227
+ for _ in range(nb_samples):
228
+ (label,) = struct.unpack(">i", file.read(4))
229
+ assert 0 <= label < nb_classes, f"Label {label} out of bounds."
230
+ labels.append(label)
231
+
232
+ if self.config.schema == "source":
233
+ for idx, example in enumerate(zip(image_paths, labels)):
234
+ yield idx, {
235
+ "image_path": example[0],
236
+ "label": example[1],
237
+ }
238
+
239
+ elif self.config.schema == _SEACROWD_SCHEMA:
240
+ for idx, example in enumerate(zip(image_paths, labels)):
241
+ yield idx, {
242
+ "id": str(idx),
243
+ "image_paths": [example[0]],
244
+ "texts": None,
245
+ "metadata": {
246
+ "context": None,
247
+ "labels": [example[1]],
248
+ },
249
+ }