Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
lhoestq HF staff commited on
Commit
95acb2e
1 Parent(s): 66905c4

Replace dbpedia_14 data url (#4022)

Browse files

* replace dbpedia_14 data url

* update dummy data

Commit from https://github.com/huggingface/datasets/commit/59c631b24f19481f69820b84e5766ca1bb49d4a1

dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"dbpedia_14": {"description": "The DBpedia ontology classification dataset is constructed by picking 14 non-overlapping classes\nfrom DBpedia 2014. They are listed in classes.txt. From each of thse 14 ontology classes, we\nrandomly choose 40,000 training samples and 5,000 testing samples. Therefore, the total size\nof the training dataset is 560,000 and testing dataset 70,000.\nThere are 3 columns in the dataset (same for train and test splits), corresponding to class index\n(1 to 14), title and content. The title and content are escaped using double quotes (\"), and any\ninternal double quote is escaped by 2 double quotes (\"\"). There are no new lines in title or content.\n", "citation": "@article{lehmann2015dbpedia,\n title={DBpedia--a large-scale, multilingual knowledge base extracted from Wikipedia},\n author={Lehmann, Jens and Isele, Robert and Jakob, Max and Jentzsch, Anja and Kontokostas, \n Dimitris and Mendes, Pablo N and Hellmann, Sebastian and Morsey, Mohamed and Van Kleef, \n Patrick and Auer, S{\"o}ren and others},\n journal={Semantic web},\n volume={6},\n number={2},\n pages={167--195},\n year={2015},\n publisher={IOS Press}\n}\n", "homepage": "https://wiki.dbpedia.org/develop/datasets", "license": "Creative Commons Attribution-ShareAlike 3.0 and the GNU Free Documentation License", "features": {"label": {"num_classes": 14, "names": ["Company", "EducationalInstitution", "Artist", "Athlete", "OfficeHolder", "MeanOfTransportation", "Building", "NaturalPlace", "Village", "Animal", "Plant", "Album", "Film", "WrittenWork"], "names_file": null, "id": null, "_type": "ClassLabel"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "d_bpedia14", "config_name": "dbpedia_14", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 178429418, "num_examples": 560000, "dataset_name": "d_bpedia14"}, "test": {"name": "test", "num_bytes": 22310341, "num_examples": 70000, "dataset_name": "d_bpedia14"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k": {"num_bytes": 68341698, "checksum": "cad5773f85d7501bb2783833768bc624641cdddf7056000a06f12bcd0239a310"}}, "download_size": 68341698, "post_processing_size": null, "dataset_size": 200739759, "size_in_bytes": 269081457}}
 
1
+ {"dbpedia_14": {"description": "The DBpedia ontology classification dataset is constructed by picking 14 non-overlapping classes\nfrom DBpedia 2014. They are listed in classes.txt. From each of thse 14 ontology classes, we\nrandomly choose 40,000 training samples and 5,000 testing samples. Therefore, the total size\nof the training dataset is 560,000 and testing dataset 70,000.\nThere are 3 columns in the dataset (same for train and test splits), corresponding to class index\n(1 to 14), title and content. The title and content are escaped using double quotes (\"), and any\ninternal double quote is escaped by 2 double quotes (\"\"). There are no new lines in title or content.\n", "citation": "@article{lehmann2015dbpedia,\n title={DBpedia--a large-scale, multilingual knowledge base extracted from Wikipedia},\n author={Lehmann, Jens and Isele, Robert and Jakob, Max and Jentzsch, Anja and Kontokostas,\n Dimitris and Mendes, Pablo N and Hellmann, Sebastian and Morsey, Mohamed and Van Kleef,\n Patrick and Auer, S{\"o}ren and others},\n journal={Semantic web},\n volume={6},\n number={2},\n pages={167--195},\n year={2015},\n publisher={IOS Press}\n}\n", "homepage": "https://wiki.dbpedia.org/develop/datasets", "license": "Creative Commons Attribution-ShareAlike 3.0 and the GNU Free Documentation License", "features": {"label": {"num_classes": 14, "names": ["Company", "EducationalInstitution", "Artist", "Athlete", "OfficeHolder", "MeanOfTransportation", "Building", "NaturalPlace", "Village", "Animal", "Plant", "Album", "Film", "WrittenWork"], "id": null, "_type": "ClassLabel"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "d_bpedia14", "config_name": "dbpedia_14", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 178428970, "num_examples": 560000, "dataset_name": "d_bpedia14"}, "test": {"name": "test", "num_bytes": 22310285, "num_examples": 70000, "dataset_name": "d_bpedia14"}}, "download_checksums": {"https://s3.amazonaws.com/fast-ai-nlp/dbpedia_csv.tgz": {"num_bytes": 68341743, "checksum": "42db5221ddedddb673a4cabcc5f3a7d869714c878bcfe4ba94b29d14aa38e417"}}, "download_size": 68341743, "post_processing_size": null, "dataset_size": 200739255, "size_in_bytes": 269080998}}
dbpedia_14.py CHANGED
@@ -16,7 +16,6 @@
16
 
17
 
18
  import csv
19
- import os
20
 
21
  import datasets
22
 
@@ -53,7 +52,7 @@ _HOMEPAGE = "https://wiki.dbpedia.org/develop/datasets"
53
  _LICENSE = "Creative Commons Attribution-ShareAlike 3.0 and the GNU Free Documentation License"
54
 
55
  _URLs = {
56
- "dbpedia_14": "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbQ2Vic1kxMmZZQ1k",
57
  }
58
 
59
 
@@ -117,29 +116,35 @@ class DBpedia14(datasets.GeneratorBasedBuilder):
117
  def _split_generators(self, dl_manager):
118
  """Returns SplitGenerators."""
119
  my_urls = _URLs[self.config.name]
120
- data_dir = dl_manager.download_and_extract(my_urls)
121
  return [
122
  datasets.SplitGenerator(
123
  name=datasets.Split.TRAIN,
124
  gen_kwargs={
125
- "filepath": os.path.join(data_dir, "dbpedia_csv/train.csv"),
126
- "split": "train",
127
  },
128
  ),
129
  datasets.SplitGenerator(
130
  name=datasets.Split.TEST,
131
- gen_kwargs={"filepath": os.path.join(data_dir, "dbpedia_csv/test.csv"), "split": "test"},
 
 
 
132
  ),
133
  ]
134
 
135
- def _generate_examples(self, filepath, split):
136
  """Yields examples."""
137
 
138
- with open(filepath, encoding="utf-8") as f:
139
- data = csv.reader(f, delimiter=",", quoting=csv.QUOTE_NONNUMERIC)
140
- for id_, row in enumerate(data):
141
- yield id_, {
142
- "title": row[1],
143
- "content": row[2],
144
- "label": int(row[0]) - 1,
145
- }
 
 
 
 
16
 
17
 
18
  import csv
 
19
 
20
  import datasets
21
 
 
52
  _LICENSE = "Creative Commons Attribution-ShareAlike 3.0 and the GNU Free Documentation License"
53
 
54
  _URLs = {
55
+ "dbpedia_14": "https://s3.amazonaws.com/fast-ai-nlp/dbpedia_csv.tgz",
56
  }
57
 
58
 
 
116
  def _split_generators(self, dl_manager):
117
  """Returns SplitGenerators."""
118
  my_urls = _URLs[self.config.name]
119
+ archive = dl_manager.download(my_urls)
120
  return [
121
  datasets.SplitGenerator(
122
  name=datasets.Split.TRAIN,
123
  gen_kwargs={
124
+ "filepath": "dbpedia_csv/train.csv",
125
+ "files": dl_manager.iter_archive(archive),
126
  },
127
  ),
128
  datasets.SplitGenerator(
129
  name=datasets.Split.TEST,
130
+ gen_kwargs={
131
+ "filepath": "dbpedia_csv/test.csv",
132
+ "files": dl_manager.iter_archive(archive),
133
+ },
134
  ),
135
  ]
136
 
137
+ def _generate_examples(self, filepath, files):
138
  """Yields examples."""
139
 
140
+ for path, f in files:
141
+ if path == filepath:
142
+ lines = (line.decode("utf-8") for line in f)
143
+ data = csv.reader(lines, delimiter=",", quoting=csv.QUOTE_NONNUMERIC)
144
+ for id_, row in enumerate(data):
145
+ yield id_, {
146
+ "title": row[1],
147
+ "content": row[2],
148
+ "label": int(row[0]) - 1,
149
+ }
150
+ break
dummy/dbpedia_14/2.0.0/dummy_data.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98d4ad02edaedc492459d94204bb149b82a55c2eb5bed0ad77b76ccbe387cd97
3
- size 3319
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c50b7d5318e6d689f5c7740acf147234955f1beb9ef4c9e06696ea9f74c8e6
3
+ size 4355