shivam commited on
Commit
b4cca67
·
1 Parent(s): 7ff5f61

Initial Commit

Browse files
Files changed (1) hide show
  1. split-test.py +97 -195
split-test.py CHANGED
@@ -1,4 +1,3 @@
1
- # coding=utf-8
2
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,238 +11,141 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """The Tweet Eval Datasets"""
16
 
17
 
 
 
 
 
18
  import datasets
19
 
20
 
 
 
21
  _CITATION = """\
22
- @inproceedings{barbieri2020tweeteval,
23
- title={{TweetEval:Unified Benchmark and Comparative Evaluation for Tweet Classification}},
24
- author={Barbieri, Francesco and Camacho-Collados, Jose and Espinosa-Anke, Luis and Neves, Leonardo},
25
- booktitle={Proceedings of Findings of EMNLP},
26
- year={2020}
27
  }
28
  """
29
 
 
 
30
  _DESCRIPTION = """\
31
- TweetEval consists of seven heterogenous tasks in Twitter, all framed as multi-class tweet classification. All tasks have been unified into the same benchmark, with each dataset presented in the same format and with fixed training, validation and test splits.
32
  """
33
 
34
- _HOMEPAGE = "https://github.com/cardiffnlp/tweeteval"
 
35
 
 
36
  _LICENSE = ""
37
 
38
- URL = "https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/"
39
-
40
- _URLs = {
41
- "emoji": {
42
- "train_text": URL + "emoji/train_text.txt",
43
- "train_labels": URL + "emoji/train_labels.txt",
44
- "test_text": URL + "emoji/test_text.txt",
45
- "test_labels": URL + "emoji/test_labels.txt",
46
- "val_text": URL + "emoji/val_text.txt",
47
- "val_labels": URL + "emoji/val_labels.txt",
48
- },
49
- "emotion": {
50
- "train_text": URL + "emotion/train_text.txt",
51
- "train_labels": URL + "emotion/train_labels.txt",
52
- "test_text": URL + "emotion/test_text.txt",
53
- "test_labels": URL + "emotion/test_labels.txt",
54
- "val_text": URL + "emotion/val_text.txt",
55
- "val_labels": URL + "emotion/val_labels.txt",
56
- },
57
- "hate": {
58
- "train_text": URL + "hate/train_text.txt",
59
- "train_labels": URL + "hate/train_labels.txt",
60
- "test_text": URL + "hate/test_text.txt",
61
- "test_labels": URL + "hate/test_labels.txt",
62
- "val_text": URL + "hate/val_text.txt",
63
- "val_labels": URL + "hate/val_labels.txt",
64
- },
65
- "irony": {
66
- "train_text": URL + "irony/train_text.txt",
67
- "train_labels": URL + "irony/train_labels.txt",
68
- "test_text": URL + "irony/test_text.txt",
69
- "test_labels": URL + "irony/test_labels.txt",
70
- "val_text": URL + "irony/val_text.txt",
71
- "val_labels": URL + "irony/val_labels.txt",
72
- },
73
- "offensive": {
74
- "train_text": URL + "offensive/train_text.txt",
75
- "train_labels": URL + "offensive/train_labels.txt",
76
- "test_text": URL + "offensive/test_text.txt",
77
- "test_labels": URL + "offensive/test_labels.txt",
78
- "val_text": URL + "offensive/val_text.txt",
79
- "val_labels": URL + "offensive/val_labels.txt",
80
- },
81
- "sentiment": {
82
- "train_text": URL + "sentiment/train_text.txt",
83
- "train_labels": URL + "sentiment/train_labels.txt",
84
- "test_text": URL + "sentiment/test_text.txt",
85
- "test_labels": URL + "sentiment/test_labels.txt",
86
- "val_text": URL + "sentiment/val_text.txt",
87
- "val_labels": URL + "sentiment/val_labels.txt",
88
- },
89
- "stance": {
90
- "abortion": {
91
- "train_text": URL + "stance/abortion/train_text.txt",
92
- "train_labels": URL + "stance/abortion/train_labels.txt",
93
- "test_text": URL + "stance/abortion/test_text.txt",
94
- "test_labels": URL + "stance/abortion/test_labels.txt",
95
- "val_text": URL + "stance/abortion/val_text.txt",
96
- "val_labels": URL + "stance/abortion/val_labels.txt",
97
- },
98
- "atheism": {
99
- "train_text": URL + "stance/atheism/train_text.txt",
100
- "train_labels": URL + "stance/atheism/train_labels.txt",
101
- "test_text": URL + "stance/atheism/test_text.txt",
102
- "test_labels": URL + "stance/atheism/test_labels.txt",
103
- "val_text": URL + "stance/atheism/val_text.txt",
104
- "val_labels": URL + "stance/atheism/val_labels.txt",
105
- },
106
- "climate": {
107
- "train_text": URL + "stance/climate/train_text.txt",
108
- "train_labels": URL + "stance/climate/train_labels.txt",
109
- "test_text": URL + "stance/climate/test_text.txt",
110
- "test_labels": URL + "stance/climate/test_labels.txt",
111
- "val_text": URL + "stance/climate/val_text.txt",
112
- "val_labels": URL + "stance/climate/val_labels.txt",
113
- },
114
- "feminist": {
115
- "train_text": URL + "stance/feminist/train_text.txt",
116
- "train_labels": URL + "stance/feminist/train_labels.txt",
117
- "test_text": URL + "stance/feminist/test_text.txt",
118
- "test_labels": URL + "stance/feminist/test_labels.txt",
119
- "val_text": URL + "stance/feminist/val_text.txt",
120
- "val_labels": URL + "stance/feminist/val_labels.txt",
121
- },
122
- "hillary": {
123
- "train_text": URL + "stance/hillary/train_text.txt",
124
- "train_labels": URL + "stance/hillary/train_labels.txt",
125
- "test_text": URL + "stance/hillary/test_text.txt",
126
- "test_labels": URL + "stance/hillary/test_labels.txt",
127
- "val_text": URL + "stance/hillary/val_text.txt",
128
- "val_labels": URL + "stance/hillary/val_labels.txt",
129
- },
130
- },
131
  }
132
 
 
 
 
 
133
 
134
- class TweetEvalConfig(datasets.BuilderConfig):
135
- def __init__(self, *args, type=None, sub_type=None, **kwargs):
136
- super().__init__(
137
- *args,
138
- name=f"{type}" if type != "stance" else f"{type}_{sub_type}",
139
- **kwargs,
 
 
 
 
 
140
  )
141
- self.type = type
142
- self.sub_type = sub_type
143
 
144
 
145
- class TweetEval(datasets.GeneratorBasedBuilder):
146
- """TweetEval Dataset."""
 
 
 
 
 
 
 
 
 
 
 
147
 
 
 
 
148
  BUILDER_CONFIGS = [
149
- TweetEvalConfig(
150
- type=key,
151
- sub_type=None,
152
- version=datasets.Version("1.1.0"),
153
- description=f"This part of my dataset covers {key} part of TweetEval Dataset.",
154
- )
155
- for key in list(_URLs.keys())
156
- if key != "stance"
157
- ] + [
158
- TweetEvalConfig(
159
- type="stance",
160
- sub_type=key,
161
- version=datasets.Version("1.1.0"),
162
- description=f"This part of my dataset covers stance_{key} part of TweetEval Dataset.",
163
- )
164
- for key in list(_URLs["stance"].keys())
165
  ]
166
 
 
 
167
  def _info(self):
168
- if self.config.type == "stance":
169
- names = ["none", "against", "favor"]
170
- elif self.config.type == "sentiment":
171
- names = ["negative", "neutral", "positive"]
172
- elif self.config.type == "offensive":
173
- names = ["non-offensive", "offensive"]
174
- elif self.config.type == "irony":
175
- names = ["non_irony", "irony"]
176
- elif self.config.type == "hate":
177
- names = ["non-hate", "hate"]
178
- elif self.config.type == "emoji":
179
- names = [
180
- "❤",
181
- "😍",
182
- "😂",
183
- "💕",
184
- "🔥",
185
- "😊",
186
- "😎",
187
- "✨",
188
- "💙",
189
- "😘",
190
- "📷",
191
- "🇺🇸",
192
- "☀",
193
- "💜",
194
- "��",
195
- "💯",
196
- "😁",
197
- "🎄",
198
- "📸",
199
- "😜",
200
- ]
201
-
202
- else:
203
- names = ["anger", "joy", "optimism", "sadness"]
204
 
 
 
 
 
 
205
  return datasets.DatasetInfo(
 
206
  description=_DESCRIPTION,
207
- features=datasets.Features(
208
- {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=names)}
209
- ),
210
- supervised_keys=None,
 
 
211
  homepage=_HOMEPAGE,
 
212
  license=_LICENSE,
 
213
  citation=_CITATION,
214
  )
215
 
216
  def _split_generators(self, dl_manager):
217
- """Returns SplitGenerators."""
218
- if self.config.type != "stance":
219
- my_urls = _URLs[self.config.type]
220
- else:
221
- my_urls = _URLs[self.config.type][self.config.sub_type]
222
- data_dir = dl_manager.download_and_extract(my_urls)
 
 
 
223
  return [
224
  datasets.SplitGenerator(
225
  name=datasets.Split.TRAIN,
226
  # These kwargs will be passed to _generate_examples
227
- gen_kwargs={"text_path": data_dir["train_text"], "labels_path": data_dir["train_labels"]},
228
- ),
229
- datasets.SplitGenerator(
230
- name=datasets.Split.TEST,
231
- # These kwargs will be passed to _generate_examples
232
- gen_kwargs={"text_path": data_dir["test_text"], "labels_path": data_dir["test_labels"]},
233
- ),
234
- datasets.SplitGenerator(
235
- name=datasets.Split.VALIDATION,
236
- # These kwargs will be passed to _generate_examples
237
- gen_kwargs={"text_path": data_dir["val_text"], "labels_path": data_dir["val_labels"]},
238
- ),
239
  ]
240
 
241
- def _generate_examples(self, text_path, labels_path):
242
- """Yields examples."""
243
-
244
- with open(text_path, encoding="utf-8") as f:
245
- texts = f.readlines()
246
- with open(labels_path, encoding="utf-8") as f:
247
- labels = f.readlines()
248
- for i, text in enumerate(texts):
249
- yield i, {"text": text.strip(), "label": int(labels[i].strip())}
 
 
 
1
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+ """Indo Wordnet dataset"""
15
 
16
 
17
+ import csv
18
+ import json
19
+ import os
20
+
21
  import datasets
22
 
23
 
24
+ # TODO: Add BibTeX citation
25
+ # Find for instance the citation on arxiv or on the dataset repo/website
26
  _CITATION = """\
27
+ @InProceedings{huggingface:dataset,
28
+ title = {A great new dataset},
29
+ author={huggingface, Inc.
30
+ },
31
+ year={2020}
32
  }
33
  """
34
 
35
+ # TODO: Add description of the dataset here
36
+ # You can copy an official description
37
  _DESCRIPTION = """\
38
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
39
  """
40
 
41
+ # TODO: Add a link to an official homepage for the dataset here
42
+ _HOMEPAGE = ""
43
 
44
+ # TODO: Add the licence for the dataset here if you can find it
45
  _LICENSE = ""
46
 
47
+ # TODO: Add link to the official dataset URLs here
48
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
+ _URLS = {
51
+ "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
52
+ "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  }
54
 
55
+ _LANGS = {
56
+ "english": "en",
57
+ "hindi": "hi"
58
+ }
59
 
60
+ class SplitTestConfig(datasets.BuilderConfig):
61
+ """BuilderConfig for SplitTest."""
62
+
63
+ def __init__(self, name, **kwargs):
64
+ """
65
+ Args:
66
+ name: `string`, name of dataset config
67
+ **kwargs: keyword arguments forwarded to super.
68
+ """
69
+ super(SplitTestConfig, self).__init__(
70
+ version=datasets.Version("2.1.0", ""), name=name, **kwargs
71
  )
 
 
72
 
73
 
74
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
75
+ class SplitTest(datasets.GeneratorBasedBuilder):
76
+ """TODO: Short description of my dataset."""
77
+
78
+ VERSION = datasets.Version("1.1.0")
79
+
80
+ # This is an example of a dataset with multiple configurations.
81
+ # If you don't want/need to define several sub-sets in your dataset,
82
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
83
+
84
+ # If you need to make complex sub-parts in the datasets with configurable options
85
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
86
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
87
 
88
+ # You will be able to load one or the other configurations in the following list with
89
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
90
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
91
  BUILDER_CONFIGS = [
92
+ SplitTestConfig(name="english", version=VERSION, description="English"),
93
+ SplitTestConfig(name="hindi", version=VERSION, description="Hindi"),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  ]
95
 
96
+ DEFAULT_CONFIG_NAME = "english" # It's not mandatory to have a default configuration. Just use one if it make sense.
97
+
98
  def _info(self):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
+ features = datasets.Features(
101
+ {
102
+ "word": datasets.Value("string")
103
+ }
104
+ )
105
  return datasets.DatasetInfo(
106
+ # This is the description that will appear on the datasets page.
107
  description=_DESCRIPTION,
108
+ # This defines the different columns of the dataset and their types
109
+ features=features, # Here we define them above because they are different between the two configurations
110
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
111
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
112
+ # supervised_keys=("sentence", "label"),
113
+ # Homepage of the dataset for documentation
114
  homepage=_HOMEPAGE,
115
+ # License for the dataset if available
116
  license=_LICENSE,
117
+ # Citation for the dataset
118
  citation=_CITATION,
119
  )
120
 
121
  def _split_generators(self, dl_manager):
122
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
123
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
124
+
125
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
126
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
127
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
128
+
129
+ file_name = "text." + _LANGS[self.config.name]
130
+
131
  return [
132
  datasets.SplitGenerator(
133
  name=datasets.Split.TRAIN,
134
  # These kwargs will be passed to _generate_examples
135
+ gen_kwargs={
136
+ "filepath": os.path.join(data_dir, file_name),
137
+ "split": "train",
138
+ },
139
+ )
 
 
 
 
 
 
 
140
  ]
141
 
142
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
143
+ def _generate_examples(self, filepath, split):
144
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
145
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
146
+ with open(filepath, encoding="utf-8") as f:
147
+ for key, row in enumerate(f):
148
+ data = json.loads(row)
149
+ yield key, {
150
+ "word": data["word"]
151
+ }