system HF staff commited on
Commit
d16ca67
1 Parent(s): ee83ca8

Update files from the datasets library (from 1.10.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.10.0

Files changed (3) hide show
  1. README.md +4 -4
  2. dataset_infos.json +1 -1
  3. lj_speech.py +2 -0
README.md CHANGED
@@ -9,16 +9,16 @@ licenses:
9
  - other-public-domain
10
  multilinguality:
11
  - monolingual
 
 
12
  size_categories:
13
  - 10K<n<100K
14
  source_datasets:
15
  - original
16
  task_categories:
17
- - other
18
  task_ids:
19
- - other-other-automatic-speech-recognition
20
- - other-other-text-to-speech
21
- paperswithcode_id: ljspeech
22
  ---
23
 
24
  # Dataset Card for lj_speech
 
9
  - other-public-domain
10
  multilinguality:
11
  - monolingual
12
+ paperswithcode_id: ljspeech
13
+ pretty_name: LJ Speech
14
  size_categories:
15
  - 10K<n<100K
16
  source_datasets:
17
  - original
18
  task_categories:
19
+ - speech-processing
20
  task_ids:
21
+ - automatic-speech-recognition
 
 
22
  ---
23
 
24
  # Dataset Card for lj_speech
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"main": {"description": "This is a public domain speech dataset consisting of 13,100 short audio clips of a single speaker reading \npassages from 7 non-fiction books in English. A transcription is provided for each clip. Clips vary in length \nfrom 1 to 10 seconds and have a total length of approximately 24 hours.\n\nNote that in order to limit the required storage for preparing this dataset, the audio\nis stored in the .wav format and is not converted to a float32 array. To convert the audio\nfile to a float32 array, please make use of the `.map()` function as follows:\n\n\n```python\nimport soundfile as sf\n\ndef map_to_array(batch):\n speech_array, _ = sf.read(batch[\"file\"])\n batch[\"speech\"] = speech_array\n return batch\n\ndataset = dataset.map(map_to_array, remove_columns=[\"file\"])\n```\n", "citation": "@misc{ljspeech17,\n author = {Keith Ito and Linda Johnson},\n title = {The LJ Speech Dataset},\n howpublished = {\\url{https://keithito.com/LJ-Speech-Dataset/}},\n year = 2017\n}\n", "homepage": "https://keithito.com/LJ-Speech-Dataset/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "file": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "file", "output": "text"}, "builder_name": "lj_speech", "config_name": "main", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4667022, "num_examples": 13100, "dataset_name": "lj_speech"}}, "download_checksums": {"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2": {"num_bytes": 2748572632, "checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5"}}, "download_size": 2748572632, "post_processing_size": null, "dataset_size": 4667022, "size_in_bytes": 2753239654}}
 
1
+ {"main": {"description": "This is a public domain speech dataset consisting of 13,100 short audio clips of a single speaker reading \npassages from 7 non-fiction books in English. A transcription is provided for each clip. Clips vary in length \nfrom 1 to 10 seconds and have a total length of approximately 24 hours.\n\nNote that in order to limit the required storage for preparing this dataset, the audio\nis stored in the .wav format and is not converted to a float32 array. To convert the audio\nfile to a float32 array, please make use of the `.map()` function as follows:\n\n\n```python\nimport soundfile as sf\n\ndef map_to_array(batch):\n speech_array, _ = sf.read(batch[\"file\"])\n batch[\"speech\"] = speech_array\n return batch\n\ndataset = dataset.map(map_to_array, remove_columns=[\"file\"])\n```\n", "citation": "@misc{ljspeech17,\n author = {Keith Ito and Linda Johnson},\n title = {The LJ Speech Dataset},\n howpublished = {\\url{https://keithito.com/LJ-Speech-Dataset/}},\n year = 2017\n}\n", "homepage": "https://keithito.com/LJ-Speech-Dataset/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "file": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "normalized_text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "file", "output": "text"}, "task_templates": [{"task": "automatic-speech-recognition", "audio_file_path_column": "file", "transcription_column": "text"}], "builder_name": "lj_speech", "config_name": "main", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4667022, "num_examples": 13100, "dataset_name": "lj_speech"}}, "download_checksums": {"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2": {"num_bytes": 2748572632, "checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5"}}, "download_size": 2748572632, "post_processing_size": null, "dataset_size": 4667022, "size_in_bytes": 2753239654}}
lj_speech.py CHANGED
@@ -21,6 +21,7 @@ import csv
21
  import os
22
 
23
  import datasets
 
24
 
25
 
26
  _CITATION = """\
@@ -81,6 +82,7 @@ class LJSpeech(datasets.GeneratorBasedBuilder):
81
  supervised_keys=("file", "text"),
82
  homepage=_URL,
83
  citation=_CITATION,
 
84
  )
85
 
86
  def _split_generators(self, dl_manager):
 
21
  import os
22
 
23
  import datasets
24
+ from datasets.tasks import AutomaticSpeechRecognition
25
 
26
 
27
  _CITATION = """\
 
82
  supervised_keys=("file", "text"),
83
  homepage=_URL,
84
  citation=_CITATION,
85
+ task_templates=[AutomaticSpeechRecognition(audio_file_path_column="file", transcription_column="text")],
86
  )
87
 
88
  def _split_generators(self, dl_manager):