youssef101 commited on
Commit
05a6dc1
1 Parent(s): 1865834

Upload artelingo.py

Browse files
Files changed (1) hide show
  1. artelingo.py +204 -0
artelingo.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import datasets
4
+ import pandas as pd
5
+ from PIL import Image
6
+
7
+
8
+ class ArtelingoBuilderConfig(datasets.BuilderConfig):
9
+
10
+ def __init__(self, name, splits, **kwargs):
11
+ super().__init__(name, **kwargs)
12
+ self.splits = splits
13
+
14
+
15
+ # Add BibTeX citation
16
+ # Find for instance the citation on arxiv or on the dataset repo/website
17
+ _CITATION = """\
18
+ @inproceedings{mohamed2022artelingo,
19
+ title={ArtELingo: A Million Emotion Annotations of WikiArt with Emphasis on Diversity over Language and Culture},
20
+ author={Mohamed, Youssef and Abdelfattah, Mohamed and Alhuwaider, Shyma and Li, Feifan and Zhang, Xiangliang and Church, Kenneth and Elhoseiny, Mohamed},
21
+ booktitle={Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing},
22
+ pages={8770--8785},
23
+ year={2022}
24
+ }
25
+ """
26
+
27
+ # Add description of the dataset here
28
+ # You can copy an official description
29
+ _DESCRIPTION = """\
30
+ ArtELingo is a benchmark and dataset having a collection of 80,000 artworks from WikiArt with 1.2 Million annotations in English, Arabic, and Chinese.
31
+ """
32
+
33
+ # Add a link to an official homepage for the dataset here
34
+ _HOMEPAGE = "https://www.artelingo.org/"
35
+
36
+ # Add the licence for the dataset here if you can find it
37
+ _LICENSE = "Terms of Use: Before we are able to offer you access to the database, \
38
+ please agree to the following terms of use. After approval, you (the 'Researcher') \
39
+ receive permission to use the ArtELingo database (the 'Database') at King Abdullah \
40
+ University of Science and Technology (KAUST). In exchange for being able to join the \
41
+ ArtELingo community and receive such permission, Researcher hereby agrees to the \
42
+ following terms and conditions: [1.] The Researcher shall use the Database only for \
43
+ non-commercial research and educational purposes. [2.] The Universities make no \
44
+ representations or warranties regarding the Database, including but not limited to \
45
+ warranties of non-infringement or fitness for a particular purpose. [3.] Researcher \
46
+ accepts full responsibility for his or her use of the Database and shall defend and \
47
+ indemnify the Universities, including their employees, Trustees, officers and agents, \
48
+ against any and all claims arising from Researcher's use of the Database, and \
49
+ Researcher's use of any copies of copyrighted 2D artworks originally uploaded to \
50
+ http://www.wikiart.org that the Researcher may use in connection with the Database. \
51
+ [4.] Researcher may provide research associates and colleagues with access to the \
52
+ Database provided that they first agree to be bound by these terms and conditions. \
53
+ [5.] The Universities reserve the right to terminate Researcher's access to the Database \
54
+ at any time. [6.] If Researcher is employed by a for-profit, commercial entity, \
55
+ Researcher's employer shall also be bound by these terms and conditions, and Researcher \
56
+ hereby represents that he or she is fully authorized to enter into this agreement on \
57
+ behalf of such employer. [7.] The international copyright laws shall apply to all \
58
+ disputes under this agreement."
59
+
60
+ # Add link to the official dataset URLs here
61
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
62
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
63
+
64
+ # This script can work with local (downloaded) files.
65
+ _URLs = {
66
+ 'val': 'https://artelingo.s3.amazonaws.com/val.zip',
67
+ 'test': 'https://artelingo.s3.amazonaws.com/test.zip',
68
+ 'train': 'https://artelingo.s3.amazonaws.com/train.zip',
69
+ 'wecia-emo_dev': 'https://artelingo.s3.amazonaws.com/wecia_emo_dev.zip',
70
+ 'wecia-cap_dev': 'https://artelingo.s3.amazonaws.com/wecia_cap_dev.zip',
71
+ 'wecia-emo_hidden': 'https://artelingo.s3.amazonaws.com/wecia_emo_hidden.zip',
72
+ 'wecia-cap_hidden': 'https://artelingo.s3.amazonaws.com/wecia_cap_hidden.zip',
73
+ }
74
+
75
+ # _URL_ANN = "https://artelingo.s3.amazonaws.com/artelingo_release_lite.csv"
76
+
77
+
78
+ _EMOTIONS = ['contentment', 'awe', 'amusement', 'excitement', 'sadness', 'fear', 'anger', 'disgust', 'something else']
79
+
80
+ # Name of the dataset usually match the script name with CamelCase instead of snake_case
81
+ class Artelingo(datasets.GeneratorBasedBuilder):
82
+ """An example dataset script to work with ArtELingo dataset"""
83
+
84
+ VERSION = datasets.Version("1.0.0")
85
+
86
+ BUILDER_CONFIGS = [
87
+ ArtelingoBuilderConfig(name='artelingo', splits=['train', 'val', 'test'],
88
+ version=VERSION, description="The full ArtELingo dataset"),
89
+ ArtelingoBuilderConfig(name='dev', splits=['val', 'test'],
90
+ version=VERSION, description="The Test and Val subsets of ArtELingo"),
91
+ ArtelingoBuilderConfig(name='wecia-emo', splits=['dev'],
92
+ version=VERSION, description="The Dev set of the WECIA Emotion Prediction challenge"),
93
+ ArtelingoBuilderConfig(name='wecia-cap', splits=['dev'],
94
+ version=VERSION, description="The Dev set of the WECIA Affective Caption Generation challenge"),
95
+ ]
96
+ DEFAULT_CONFIG_NAME = "artelingo"
97
+
98
+ def _info(self):
99
+ # This method specifies the datasets. DatasetInfo object which contains informations and typings for the dataset
100
+
101
+ feature_dict = {
102
+ "uid": datasets.Value("int32"),
103
+ 'image': datasets.Image(),
104
+ "art_style": datasets.Value("string"),
105
+ "painting": datasets.Value("string"),
106
+ # "emotion": datasets.ClassLabel(names=_EMOTIONS),
107
+ "emotion": datasets.Value("string"),
108
+ "language": datasets.Value("string"),
109
+ "text": datasets.Value("string"),
110
+ }
111
+
112
+ features = datasets.Features(feature_dict)
113
+
114
+ return datasets.DatasetInfo(
115
+ # This is the description that will appear on the datasets page.
116
+ description=_DESCRIPTION,
117
+ # This defines the different columns of the dataset and their types
118
+ features=features, # Here we define them above because they are different between the two configurations
119
+ # If there's a common (input, target) tuple from the features,
120
+ # specify them here. They'll be used if as_supervised=True in
121
+ # builder.as_dataset.
122
+ supervised_keys=None,
123
+ # Homepage of the dataset for documentation
124
+ homepage=_HOMEPAGE,
125
+ # License for the dataset if available
126
+ license=_LICENSE,
127
+ # Citation for the dataset
128
+ citation=_CITATION,
129
+ )
130
+
131
+ def _split_generators(self, dl_manager):
132
+ """Returns SplitGenerators."""
133
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
134
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
135
+ data_dir = self.config.data_dir
136
+ if data_dir is None:
137
+ data_dir = {}
138
+ prefix = self.config.name + '_' if 'wecia' in self.config.name else ''
139
+ for split in self.config.splits:
140
+ data_dir[split] = dl_manager.download_and_extract(_URLs[prefix + split])
141
+ # data_dir = dl_manager.download_and_extract(_URLs)
142
+
143
+ splits = []
144
+ for split in self.config.splits:
145
+ dataset = datasets.SplitGenerator(
146
+ name=split,
147
+ # These kwargs will be passed to _generate_examples
148
+ gen_kwargs={
149
+ "metadata": os.path.join(data_dir[split], split, "metadata.csv"),
150
+ "image_dir": os.path.join(data_dir[split], split),
151
+ }
152
+ )
153
+ splits.append(dataset)
154
+ return splits
155
+
156
+ def _generate_examples(
157
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
158
+ self, metadata, image_dir
159
+ ):
160
+ """ Yields examples as (key, example) tuples. """
161
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
162
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
163
+
164
+ name = self.config.name
165
+
166
+ df = pd.read_csv(metadata)
167
+ uids = range(len(df))
168
+
169
+ if name == 'wecia-emo':
170
+ for uid, entry in zip(uids, df.itertuples()):
171
+ result = {
172
+ "uid": entry.uid,
173
+ "image": Image.open(os.path.join(image_dir, entry.file_name)),
174
+ "art_style": entry.art_style,
175
+ "painting": entry.painting,
176
+ "text": entry.text,
177
+ "emotion": None,
178
+ 'language': None,
179
+ }
180
+ yield (uid, result)
181
+ elif name == 'wecia-cap':
182
+ for uid, entry in zip(uids, df.itertuples()):
183
+ result = {
184
+ "uid": entry.uid,
185
+ "image": Image.open(os.path.join(image_dir, entry.file_name)),
186
+ "art_style": entry.art_style,
187
+ "painting": entry.painting,
188
+ "emotion": entry.emotion,
189
+ "language": entry.language,
190
+ "text": None,
191
+ }
192
+ yield (uid, result)
193
+ else:
194
+ for uid, entry in zip(uids, df.itertuples()):
195
+ result = {
196
+ "uid": uid,
197
+ "image": Image.open(os.path.join(image_dir, entry.file_name)),
198
+ "art_style": entry.art_style,
199
+ "painting": entry.painting,
200
+ "emotion": entry.emotion,
201
+ "language": entry.language,
202
+ "text": entry.text,
203
+ }
204
+ yield (uid, result)