File size: 3,135 Bytes
f921f22
7b1bf35
 
5348317
7b1bf35
 
 
 
f921f22
f0b8559
961eb78
 
 
f0b8559
961eb78
f0b8559
 
961eb78
f0b8559
961eb78
 
 
 
f0b8559
 
961eb78
f0b8559
 
 
961eb78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f0b8559
f921f22
 
f0b8559
961eb78
 
f0b8559
 
f921f22
 
f0b8559
961eb78
 
f0b8559
 
f921f22
 
f0b8559
961eb78
 
f0b8559
 
 
 
961eb78
 
 
 
 
 
 
f0b8559
961eb78
f0b8559
961eb78
6fa2e8e
6e71e2e
 
6fa2e8e
961eb78
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, Features, Value, Audio,SplitGenerator, Split
import os
import json
import csv

import datasets
from datasets.utils.py_utils import size_str
from tqdm import tqdm


_BASE_URL = "https://huggingface.co/datasets/iulik-pisik/horoscop_neti/resolve/main/"
_AUDIO_URL = _BASE_URL + "audio/{split}.tar"
_TRANSCRIPT_URL = _BASE_URL + "transcript/{split}.tsv"

class HoroscopNeti(GeneratorBasedBuilder):
    
    def _info(self):
        return DatasetInfo(
            description="Descrierea datasetului tău.",
            features=Features({
                "path": Value("string"),
                "audio": Audio(sampling_rate=16000),
                "transcript": Value("string"),
            }),
            supervised_keys=("audio", "transcript"),
            homepage="https://huggingface.co/datasets/iulik-pisik/horoscop_neti",
            citation="Referința de citare a datasetului",
        )

    def _split_generators(self, dl_manager):
        audio_urls = {
            "train_audio": _AUDIO_URL.format(split="train"),
            "test_audio": _AUDIO_URL.format(split="test"),
            "validation_audio": _AUDIO_URL.format(split="validation"),
        }
        tsv_urls = {
            "train_tsv": _TRANSCRIPT_URL.format(split="train"),
            "test_tsv": _TRANSCRIPT_URL.format(split="test"),
            "validation_tsv": _TRANSCRIPT_URL.format(split="validation"),
        }
    
        downloaded_audio_files = dl_manager.download_and_extract(audio_urls)
        downloaded_tsv_files = dl_manager.download(tsv_urls)
    
        return [
            SplitGenerator(
                name=Split.TRAIN,
                gen_kwargs={
                    "archive_path": downloaded_audio_files["train_audio"],
                    "tsv_path": downloaded_tsv_files["train_tsv"],
                },
            ),
            SplitGenerator(
                name=Split.TEST,
                gen_kwargs={
                    "archive_path": downloaded_audio_files["test_audio"],
                    "tsv_path": downloaded_tsv_files["test_tsv"],
                },
            ),
            SplitGenerator(
                name=Split.VALIDATION,
                gen_kwargs={
                    "archive_path": downloaded_audio_files["validation_audio"],
                    "tsv_path": downloaded_tsv_files["validation_tsv"],
                },
            ),
        ]


    def _generate_examples(self, archive_path, tsv_path):
        with open(tsv_path, encoding="utf-8") as f:
            reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
            for row in tqdm(reader, desc="Se citesc datele..."):
                audio_file_name = row["path"]
                audio_path = os.path.join(archive_path, audio_file_name)
                
                if not os.path.isfile(audio_path):
                    continue
    
                yield audio_file_name, {
                    "path": audio_path,  
                    "audio": audio_path,
                    "transcript": row["sentence"],
                }