Datasets:

Languages:
English
ArXiv:
License:
dfki-nlp commited on
Commit
40d3755
1 Parent(s): e6eb656

Update tacred.py

Browse files
Files changed (1) hide show
  1. tacred.py +8 -16
tacred.py CHANGED
@@ -1,3 +1,6 @@
 
 
 
1
  import json
2
  import os
3
 
@@ -59,7 +62,8 @@ _HOMEPAGE = "https://nlp.stanford.edu/projects/tacred/"
59
  # TODO: Add the licence for the dataset here if you can find it
60
  _LICENSE = "LDC"
61
 
62
- # TODO: Add link to the official dataset URLs here
 
63
  # The HuggingFace dataset library don't host the datasets but only point to the original files
64
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
65
  _PATCH_URLs = {
@@ -125,21 +129,10 @@ def convert_ptb_token(token: str) -> str:
125
  }.get(token.lower(), token)
126
 
127
 
128
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
129
- class TACRED(datasets.GeneratorBasedBuilder):
130
- """TODO: Short description of my dataset."""
131
-
132
- # This is an example of a dataset with multiple configurations.
133
- # If you don't want/need to define several sub-sets in your dataset,
134
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
135
-
136
- # If you need to make complex sub-parts in the datasets with configurable options
137
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
138
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
139
 
140
- # You will be able to load one or the other configurations in the following list with
141
- # data = datasets.load_dataset('my_dataset', 'first_domain')
142
- # data = datasets.load_dataset('my_dataset', 'second_domain')
143
  BUILDER_CONFIGS = [
144
  datasets.BuilderConfig(
145
  name="original", version=datasets.Version("1.0.0"), description="The original TACRED."
@@ -201,7 +194,6 @@ class TACRED(datasets.GeneratorBasedBuilder):
201
 
202
  def _split_generators(self, dl_manager):
203
  """Returns SplitGenerators."""
204
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
205
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
206
 
207
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
 
1
+ """TODO: Add a description here."""
2
+
3
+
4
  import json
5
  import os
6
 
 
62
  # TODO: Add the licence for the dataset here if you can find it
63
  _LICENSE = "LDC"
64
 
65
+ _URL = "https://catalog.ldc.upenn.edu/LDC2018T24"
66
+
67
  # The HuggingFace dataset library don't host the datasets but only point to the original files
68
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
69
  _PATCH_URLs = {
 
129
  }.get(token.lower(), token)
130
 
131
 
132
+ class Tacred(datasets.GeneratorBasedBuilder):
133
+ """TACRED is a large-scale relation extraction dataset with 106,264 examples built over newswire
134
+ and web text from the corpus used in the yearly TAC Knowledge Base Population (TAC KBP) challenges."""
 
 
 
 
 
 
 
 
135
 
 
 
 
136
  BUILDER_CONFIGS = [
137
  datasets.BuilderConfig(
138
  name="original", version=datasets.Version("1.0.0"), description="The original TACRED."
 
194
 
195
  def _split_generators(self, dl_manager):
196
  """Returns SplitGenerators."""
 
197
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
198
 
199
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs