Datasets:

Languages:
English
ArXiv:
License:
dfki-nlp commited on
Commit
e6eb656
1 Parent(s): 7879210

Update tacred.py

Browse files
Files changed (1) hide show
  1. tacred.py +39 -16
tacred.py CHANGED
@@ -1,6 +1,3 @@
1
- """TODO: Add a description here."""
2
-
3
-
4
  import json
5
  import os
6
 
@@ -43,14 +40,24 @@ _CITATION = """\
43
  # TODO: Add description of the dataset here
44
  # You can copy an official description
45
  _DESCRIPTION = """\
46
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
 
 
 
 
 
 
 
 
 
 
47
  """
48
 
49
  # TODO: Add a link to an official homepage for the dataset here
50
- _HOMEPAGE = ""
51
 
52
  # TODO: Add the licence for the dataset here if you can find it
53
- _LICENSE = ""
54
 
55
  # TODO: Add link to the official dataset URLs here
56
  # The HuggingFace dataset library don't host the datasets but only point to the original files
@@ -158,12 +165,20 @@ class TACRED(datasets.GeneratorBasedBuilder):
158
  def _info(self):
159
  features = datasets.Features(
160
  {
 
 
161
  "tokens": datasets.Sequence(datasets.Value("string")),
162
- "head_start": datasets.Value("int32"),
163
- "head_end": datasets.Value("int32"),
164
- "tail_start": datasets.Value("int32"),
165
- "tail_end": datasets.Value("int32"),
166
- "label": datasets.ClassLabel(names=_CLASS_LABELS),
 
 
 
 
 
 
167
  }
168
  )
169
 
@@ -248,10 +263,18 @@ class TACRED(datasets.GeneratorBasedBuilder):
248
  example.update(patch_examples[id_])
249
 
250
  yield id_, {
 
 
251
  "tokens": [convert_ptb_token(token) for token in example["token"]],
252
- "head_start": example["subj_start"],
253
- "head_end": example["subj_end"] + 1, # make end offset exclusive
254
- "tail_start": example["obj_start"],
255
- "tail_end": example["obj_end"] + 1, # make end offset exclusive
256
- "label": example["relation"],
 
 
 
 
 
 
257
  }
 
 
 
 
1
  import json
2
  import os
3
 
 
40
  # TODO: Add description of the dataset here
41
  # You can copy an official description
42
  _DESCRIPTION = """\
43
+ TACRED is a large-scale relation extraction dataset with 106,264 examples built over newswire
44
+ and web text from the corpus used in the yearly TAC Knowledge Base Population (TAC KBP) challenges.
45
+ Examples in TACRED cover 41 relation types as used in the TAC KBP challenges (e.g., per:schools_attended
46
+ and org:members) or are labeled as no_relation if no defined relation is held. These examples are created
47
+ by combining available human annotations from the TAC KBP challenges and crowdsourcing.
48
+
49
+ Please see our EMNLP paper, or our EMNLP slides for full details.
50
+
51
+ Note: There is currently a label-corrected version of the TACRED dataset, which you should consider using instead of
52
+ the original version released in 2017. For more details on this new version, see the TACRED Revisited paper
53
+ published at ACL 2020.
54
  """
55
 
56
  # TODO: Add a link to an official homepage for the dataset here
57
+ _HOMEPAGE = "https://nlp.stanford.edu/projects/tacred/"
58
 
59
  # TODO: Add the licence for the dataset here if you can find it
60
+ _LICENSE = "LDC"
61
 
62
  # TODO: Add link to the official dataset URLs here
63
  # The HuggingFace dataset library don't host the datasets but only point to the original files
 
165
  def _info(self):
166
  features = datasets.Features(
167
  {
168
+ "id": datasets.Value("string"),
169
+ "docid": datasets.Value("string"),
170
  "tokens": datasets.Sequence(datasets.Value("string")),
171
+ "subj_start": datasets.Value("int32"),
172
+ "subj_end": datasets.Value("int32"),
173
+ "subj_type": datasets.Value("string"),
174
+ "obj_start": datasets.Value("int32"),
175
+ "obj_end": datasets.Value("int32"),
176
+ "obj_type": datasets.Value("string"),
177
+ "pos_tags": datasets.Sequence(datasets.Value("string")),
178
+ "ner_tags": datasets.Sequence(datasets.Value("string")),
179
+ "stanford_deprel": datasets.Sequence(datasets.Value("string")),
180
+ "stanford_head": datasets.Sequence(datasets.Value("int32")),
181
+ "relation": datasets.ClassLabel(names=_CLASS_LABELS),
182
  }
183
  )
184
 
 
263
  example.update(patch_examples[id_])
264
 
265
  yield id_, {
266
+ "id": example["id"],
267
+ "docid": example["docid"],
268
  "tokens": [convert_ptb_token(token) for token in example["token"]],
269
+ "subj_start": example["subj_start"],
270
+ "subj_end": example["subj_end"] + 1, # make end offset exclusive
271
+ "subj_type": example["subj_type"],
272
+ "obj_start": example["obj_start"],
273
+ "obj_end": example["obj_end"] + 1, # make end offset exclusive
274
+ "obj_type": example["obj_type"],
275
+ "relation": example["relation"],
276
+ "pos_tags": example["stanford_pos"],
277
+ "ner_tags": example["stanford_ner"],
278
+ "stanford_deprel": example["stanford_deprel"],
279
+ "stanford_head": example["stanford_head"]
280
  }