dinhquangson commited on
Commit
d02b41c
1 Parent(s): d7ac72c

Update FUNSD.py

Browse files
Files changed (1) hide show
  1. FUNSD.py +64 -56
FUNSD.py CHANGED
@@ -2,11 +2,13 @@
2
 
3
  import json
4
  import os
 
5
 
6
  import datasets
7
 
8
  from PIL import Image
9
  import numpy as np
 
10
  from transformers import AutoTokenizer
11
 
12
  logger = datasets.logging.get_logger(__name__)
@@ -27,14 +29,15 @@ _DESCRIPTION = """\
27
  https://guillaumejaume.github.io/FUNSD/
28
  """
29
 
30
- def load_image(image_path):
31
  image = Image.open(image_path).convert("RGB")
32
  w, h = image.size
33
- # resize image to 224x224
34
- image = image.resize((224, 224))
35
- image = np.asarray(image)
36
- image = image[:, :, ::-1] # flip color channels from RGB to BGR
37
- image = image.transpose(2, 0, 1) # move channels to first dimension
 
38
  return image, (w, h)
39
 
40
  def simplify_bbox(bbox):
@@ -99,6 +102,14 @@ class Funsd(datasets.GeneratorBasedBuilder):
99
  "label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]),
100
  }
101
  ),
 
 
 
 
 
 
 
 
102
  "relations": datasets.Sequence(
103
  {
104
  "head": datasets.Value("int64"),
@@ -131,33 +142,33 @@ class Funsd(datasets.GeneratorBasedBuilder):
131
  ann_dir = os.path.join(filepath, "annotations")
132
  img_dir = os.path.join(filepath, "images")
133
  for guid, file in enumerate(sorted(os.listdir(ann_dir))):
134
- words = []
135
- bboxes = []
136
- labels = []
 
 
 
 
 
 
 
 
137
  tokenized_doc = {"input_ids": [], "bbox": [], "labels": []}
138
  entities = []
139
  relations = []
 
140
  id2label = {}
141
  entity_id_to_index_map = {}
142
  empty_entity = set()
143
- file_path = os.path.join(ann_dir, file)
144
- with open(file_path, "r", encoding="utf8") as f:
145
- data = json.load(f)
146
- image_path = os.path.join(img_dir, file)
147
- image_path = image_path.replace("json", "png")
148
- image, size = load_image(image_path)
149
- for item in data["form"]:
150
- words_example, label = item["words"], item["label"]
151
- words_example = [w for w in words_example if w["text"].strip() != ""]
152
- if len(words_example) == 0:
153
- continue
154
- if len(item["text"]) == 0:
155
- empty_entity.add(item["id"])
156
  continue
157
- id2label[item["id"]] = item["label"]
158
- relations.extend([tuple(sorted(l)) for l in item["linking"]])
159
  tokenized_inputs = self.tokenizer(
160
- item["text"],
161
  add_special_tokens=False,
162
  return_offsets_mapping=True,
163
  return_attention_mask=False,
@@ -165,6 +176,7 @@ class Funsd(datasets.GeneratorBasedBuilder):
165
  text_length = 0
166
  ocr_length = 0
167
  bbox = []
 
168
  for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]):
169
  if token_id == 6:
170
  bbox.append(None)
@@ -172,7 +184,9 @@ class Funsd(datasets.GeneratorBasedBuilder):
172
  text_length += offset[1] - offset[0]
173
  tmp_box = []
174
  while ocr_length < text_length:
175
- ocr_word = item["words"].pop(0)
 
 
176
  ocr_length += len(
177
  self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word["text"].strip())
178
  )
@@ -180,44 +194,28 @@ class Funsd(datasets.GeneratorBasedBuilder):
180
  if len(tmp_box) == 0:
181
  tmp_box = last_box
182
  bbox.append(normalize_bbox(merge_bbox(tmp_box), size))
183
- last_box = tmp_box # noqa
184
-
185
  bbox = [
186
  [bbox[i + 1][0], bbox[i + 1][1], bbox[i + 1][0], bbox[i + 1][1]] if b is None else b
187
  for i, b in enumerate(bbox)
188
  ]
189
- tokenized_inputs.update({"bbox": bbox, "labels": item["label"].upper()})
190
-
191
-
192
- if item["label"] == "other":
193
- for w in words_example:
194
- words.append(w["text"])
195
- labels.append("O")
196
- bboxes.append(normalize_bbox(w["box"], size))
197
- #label = ["O"] * len(bbox)
198
  else:
199
- words.append(words_example[0]["text"])
200
- labels.append("B-" + item["label"].upper())
201
- bboxes.append(normalize_bbox(words_example[0]["box"], size))
202
- for w in words_example[1:]:
203
- words.append(w["text"])
204
- labels.append("I-" + item["label"].upper())
205
- bboxes.append(normalize_bbox(w["box"], size))
206
- #label = [f"I-{item['label'].upper()}"] * len(bbox)
207
- #label[0] = f"B-{item['label'].upper()}"
208
-
209
  if label[0] != "O":
210
- entity_id_to_index_map[item["id"]] = len(entities)
211
- entities.append(
212
  {
213
- "start": len(tokenized_doc["input_ids"]),
214
- "end": len(tokenized_doc["input_ids"]) + len(tokenized_inputs["input_ids"]),
215
- "label": item["label"].upper(),
216
  }
217
  )
218
  for i in tokenized_doc:
219
  tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i]
220
-
221
  relations = list(set(relations))
222
  relations = [rel for rel in relations if rel[0] not in empty_entity and rel[1] not in empty_entity]
223
  kvrelations = []
@@ -255,12 +253,13 @@ class Funsd(datasets.GeneratorBasedBuilder):
255
  )
256
  chunk_size = 512
257
  for chunk_id, index in enumerate(range(0, len(tokenized_doc["input_ids"]), chunk_size)):
 
258
  for k in tokenized_doc:
259
  item[k] = tokenized_doc[k][index : index + chunk_size]
260
  entities_in_this_span = []
261
  global_to_local_map = {}
262
  for entity_id, entity in enumerate(entities):
263
- if (
264
  index <= entity["start"] < index + chunk_size
265
  and index <= entity["end"] < index + chunk_size
266
  ):
@@ -270,7 +269,7 @@ class Funsd(datasets.GeneratorBasedBuilder):
270
  entities_in_this_span.append(entity)
271
  relations_in_this_span = []
272
  for relation in relations:
273
- if (
274
  index <= relation["start_index"] < index + chunk_size
275
  and index <= relation["end_index"] < index + chunk_size
276
  ):
@@ -282,5 +281,14 @@ class Funsd(datasets.GeneratorBasedBuilder):
282
  "end_index": relation["end_index"] - index,
283
  }
284
  )
285
- yield guid,{"id": str(guid), "words": words, "bboxes": bboxes, "labels": labels, "image": image, "entities": entities, "relations": relations}
 
 
 
 
 
 
 
 
 
286
 
 
2
 
3
  import json
4
  import os
5
+ import logging
6
 
7
  import datasets
8
 
9
  from PIL import Image
10
  import numpy as np
11
+
12
  from transformers import AutoTokenizer
13
 
14
  logger = datasets.logging.get_logger(__name__)
 
29
  https://guillaumejaume.github.io/FUNSD/
30
  """
31
 
32
+ def load_image(image_path, size=None):
33
  image = Image.open(image_path).convert("RGB")
34
  w, h = image.size
35
+ if size is not None:
36
+ # resize image
37
+ image = image.resize((size, size))
38
+ image = np.asarray(image)
39
+ image = image[:, :, ::-1] # flip color channels from RGB to BGR
40
+ image = image.transpose(2, 0, 1) # move channels to first dimension
41
  return image, (w, h)
42
 
43
  def simplify_bbox(bbox):
 
102
  "label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]),
103
  }
104
  ),
105
+ "original_image": datasets.features.Image(),
106
+ "entities": datasets.Sequence(
107
+ {
108
+ "start": datasets.Value("int64"),
109
+ "end": datasets.Value("int64"),
110
+ "label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]),
111
+ }
112
+ ),
113
  "relations": datasets.Sequence(
114
  {
115
  "head": datasets.Value("int64"),
 
142
  ann_dir = os.path.join(filepath, "annotations")
143
  img_dir = os.path.join(filepath, "images")
144
  for guid, file in enumerate(sorted(os.listdir(ann_dir))):
145
+ doc_id = file.split(".")[0]
146
+ file_path = os.path.join(ann_dir, file)
147
+ with open(file_path, "r", encoding="utf8") as f:
148
+ document = json.load(f)
149
+ image_path = os.path.join(img_dir, file)
150
+ image_path = image_path.replace("json", "png")
151
+ image, size = load_image(image_path, size=224)
152
+ original_image, _ = load_image(image_path)
153
+
154
+
155
+ document = document["form"]
156
  tokenized_doc = {"input_ids": [], "bbox": [], "labels": []}
157
  entities = []
158
  relations = []
159
+ # image id to label dict
160
  id2label = {}
161
  entity_id_to_index_map = {}
162
  empty_entity = set()
163
+ for line in document:
164
+ # word navako text lai empty_entity ma add garne
165
+ if len(line["text"]) == 0:
166
+ empty_entity.add(line["id"])
 
 
 
 
 
 
 
 
 
167
  continue
168
+ id2label[line["id"]] = line["label"]
169
+ relations.extend([tuple(sorted(l)) for l in line["linking"]])
170
  tokenized_inputs = self.tokenizer(
171
+ line["text"],
172
  add_special_tokens=False,
173
  return_offsets_mapping=True,
174
  return_attention_mask=False,
 
176
  text_length = 0
177
  ocr_length = 0
178
  bbox = []
179
+ last_box = None
180
  for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]):
181
  if token_id == 6:
182
  bbox.append(None)
 
184
  text_length += offset[1] - offset[0]
185
  tmp_box = []
186
  while ocr_length < text_length:
187
+ if len(line["words"]) == 0:
188
+ break
189
+ ocr_word = line["words"].pop(0)
190
  ocr_length += len(
191
  self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word["text"].strip())
192
  )
 
194
  if len(tmp_box) == 0:
195
  tmp_box = last_box
196
  bbox.append(normalize_bbox(merge_bbox(tmp_box), size))
197
+ last_box = tmp_box
 
198
  bbox = [
199
  [bbox[i + 1][0], bbox[i + 1][1], bbox[i + 1][0], bbox[i + 1][1]] if b is None else b
200
  for i, b in enumerate(bbox)
201
  ]
202
+ if line["label"] == "other":
203
+ label = ["O"] * len(bbox)
 
 
 
 
 
 
 
204
  else:
205
+ label = [f"I-{line['label'].upper()}"] * len(bbox)
206
+ label[0] = f"B-{line['label'].upper()}"
207
+ tokenized_inputs.update({"bbox": bbox, "labels": label})
 
 
 
 
 
 
 
208
  if label[0] != "O":
209
+ entity_id_to_index_map[line["id"]] = len(entities)
210
+ entities.append( # determine the number of tokens wiithin the text and their start and end index
211
  {
212
+ "start": len(tokenized_doc["input_ids"]), # start index of the token of text. eg for text hello world having token hello world, it is 0
213
+ "end": len(tokenized_doc["input_ids"]) + len(tokenized_inputs["input_ids"]), # end index of the token of text. This will be 2 for hello world.
214
+ "label": line["label"].upper(), # label of the text
215
  }
216
  )
217
  for i in tokenized_doc:
218
  tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i]
 
219
  relations = list(set(relations))
220
  relations = [rel for rel in relations if rel[0] not in empty_entity and rel[1] not in empty_entity]
221
  kvrelations = []
 
253
  )
254
  chunk_size = 512
255
  for chunk_id, index in enumerate(range(0, len(tokenized_doc["input_ids"]), chunk_size)):
256
+ item = {}
257
  for k in tokenized_doc:
258
  item[k] = tokenized_doc[k][index : index + chunk_size]
259
  entities_in_this_span = []
260
  global_to_local_map = {}
261
  for entity_id, entity in enumerate(entities):
262
+ if ( # yo condition garda yedi text ko ek part euta chunk ra baki arko chunk ma aayo vane k garne?
263
  index <= entity["start"] < index + chunk_size
264
  and index <= entity["end"] < index + chunk_size
265
  ):
 
269
  entities_in_this_span.append(entity)
270
  relations_in_this_span = []
271
  for relation in relations:
272
+ if ( # yo condition garda yedi question euta chunk ra answer arko chunk ma aayo vane k garne?
273
  index <= relation["start_index"] < index + chunk_size
274
  and index <= relation["end_index"] < index + chunk_size
275
  ):
 
281
  "end_index": relation["end_index"] - index,
282
  }
283
  )
284
+ item.update(
285
+ {
286
+ "id": f"{doc_id}_{chunk_id}",
287
+ "image": image,
288
+ "original_image": original_image,
289
+ "entities": entities_in_this_span,
290
+ "relations": relations_in_this_span,
291
+ }
292
+ )
293
+ yield f"{doc_id}_{chunk_id}", item
294