nglaura commited on
Commit
c8daf04
1 Parent(s): 17ba79b

Create hal-summarization.py

Browse files
Files changed (1) hide show
  1. hal-summarization.py +134 -0
hal-summarization.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import datasets
4
+ from tqdm import tqdm
5
+
6
+
7
+ _ARTICLE_ID = "article_id"
8
+ _ARTICLE_WORDS = "article_words"
9
+ _ARTICLE_BBOXES = "article_bboxes"
10
+ _ARTICLE_NORM_BBOXES = "article_norm_bboxes"
11
+ _ABSTRACT = "abstract"
12
+ _ARTICLE_PDF_URL = "article_pdf_url"
13
+
14
+ def normalize_bbox(bbox, size):
15
+ return [
16
+ int(1000 * bbox[0] / size[0]),
17
+ int(1000 * bbox[1] / size[1]),
18
+ int(1000 * bbox[2] / size[0]),
19
+ int(1000 * bbox[3] / size[1]),
20
+ ]
21
+
22
+
23
+ class HALSummarizationConfig(datasets.BuilderConfig):
24
+ """BuilderConfig for HALSummarization."""
25
+ def __init__(self, **kwargs):
26
+ """BuilderConfig for ArxivSummarization.
27
+ Args:
28
+ **kwargs: keyword arguments forwarded to super.
29
+ """
30
+ super(HALSummarizationConfig, self).__init__(**kwargs)
31
+
32
+
33
+ class HALSummarizationDataset(datasets.GeneratorBasedBuilder):
34
+ """HALSummarization Dataset."""
35
+
36
+ _TRAIN_ARCHIVE = "train.zip"
37
+ _VAL_ARCHIVE = "val.zip"
38
+ _TEST_ARCHIVE = "test.zip"
39
+ _TRAIN_ABSTRACTS = "train.txt"
40
+ _VAL_ABSTRACTS = "validation.txt"
41
+ _TEST_ABSTRACTS = "test.txt"
42
+
43
+ BUILDER_CONFIGS = [
44
+ HALSummarizationConfig(
45
+ name="hal",
46
+ version=datasets.Version("1.0.0"),
47
+ description="HAL dataset for summarization",
48
+ ),
49
+ ]
50
+
51
+
52
+ def _info(self):
53
+ # Should return a datasets.DatasetInfo object
54
+ return datasets.DatasetInfo(
55
+ features=datasets.Features(
56
+ {
57
+ _ARTICLE_ID: datasets.Value("string"),
58
+ _ARTICLE_WORDS: datasets.Sequence(datasets.Value("string")),
59
+ _ARTICLE_BBOXES: datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
60
+ _ARTICLE_NORM_BBOXES: datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
61
+ _ABSTRACT: datasets.Value("string"),
62
+ _ARTICLE_PDF_URL: datasets.Value("string"),
63
+ }
64
+ ),
65
+ supervised_keys=None,
66
+ )
67
+
68
+ def _split_generators(self, dl_manager):
69
+
70
+ train_dir = os.path.join(dl_manager.download_and_extract(self._TRAIN_ARCHIVE), "train")
71
+ val_dir = os.path.join(dl_manager.download_and_extract(self._VAL_ARCHIVE), "val")
72
+ test_dir = os.path.join(dl_manager.download_and_extract(self._TEST_ARCHIVE), "test")
73
+
74
+ train_abstracts = dl_manager.download_and_extract(self._TRAIN_ABSTRACTS)
75
+ val_abstracts = dl_manager.download_and_extract(self._VAL_ABSTRACTS)
76
+ test_abstracts = dl_manager.download_and_extract(self._TEST_ABSTRACTS)
77
+
78
+ return [
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TRAIN,
81
+ gen_kwargs={"data_path": train_dir, "abstract_path": train_abstracts}
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.VALIDATION,
85
+ gen_kwargs={"data_path": val_dir, "abstract_path": val_abstracts}
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TEST,
89
+ gen_kwargs={"data_path": test_dir, "abstract_path": test_abstracts}
90
+ ),
91
+ ]
92
+
93
+ def _generate_examples(self, data_path, abstract_path):
94
+ """Generate HALSummarization examples."""
95
+ filenames = sorted(os.listdir(data_path))
96
+
97
+ guid = 0
98
+ with open(abstract_path, 'r') as abstract_file:
99
+ for line in tqdm(abstract_file, total=len(filenames), desc=f"Reading files in {data_path}"):
100
+ guid += 1
101
+ item = json.loads(line)
102
+ fname = item["id"] + ".txt"
103
+ filepath = os.path.join(data_path, fname)
104
+
105
+ words = []
106
+ bboxes = []
107
+ norm_bboxes = []
108
+
109
+ with open(filepath, encoding="utf-8") as f:
110
+ for line in f:
111
+ splits = line.split("\t")
112
+ word = splits[0]
113
+ bbox = splits[1:5]
114
+ bbox = [int(b) for b in bbox]
115
+ page_width, page_height = int(splits[5]), int(splits[6])
116
+ norm_bbox = normalize_bbox(bbox, (page_width, page_height))
117
+
118
+ words.append(word)
119
+ bboxes.append(bbox)
120
+ norm_bboxes.append(norm_bbox)
121
+
122
+ assert len(words) == len(bboxes)
123
+ assert len(bboxes) == len(norm_bboxes)
124
+
125
+ yield guid, {
126
+ _ARTICLE_ID: item["id"],
127
+ _ARTICLE_WORDS: words,
128
+ _ARTICLE_BBOXES: bboxes,
129
+ _ARTICLE_NORM_BBOXES: norm_bboxes,
130
+ _ABSTRACT: item["abstract"],
131
+ _ARTICLE_PDF_URL: item["pdf_url"],
132
+ }
133
+
134
+