EdwardHayashi-2023 commited on
Commit
28d14c9
·
1 Parent(s): 9b93499

Create MELD-Audio.py

Browse files
Files changed (1) hide show
  1. MELD-Audio.py +148 -0
MELD-Audio.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Fri Apr 21 12:49:21 2023
5
+
6
+ @author: lin.kinwahedward
7
+ """
8
+ #------------------------------------------------------------------------------
9
+ # Standard Libraries
10
+ import datasets
11
+ import csv
12
+ #------------------------------------------------------------------------------
13
+ """The Audio, Speech, and Vision Processing Lab - Emotional Sound Database (ASVP - ESD)"""
14
+
15
+ _CITATION = """\
16
+ @article{poria2018meld,
17
+ title={Meld: A multimodal multi-party dataset for emotion recognition in conversations},
18
+ author={Poria, Soujanya and Hazarika, Devamanyu and Majumder, Navonil and Naik, Gautam and Cambria, Erik and Mihalcea, Rada},
19
+ journal={arXiv preprint arXiv:1810.02508},
20
+ year={2018}
21
+ }
22
+ @article{chen2018emotionlines,
23
+ title={Emotionlines: An emotion corpus of multi-party conversations},
24
+ author={Chen, Sheng-Yeh and Hsu, Chao-Chun and Kuo, Chuan-Chun and Ku, Lun-Wei and others},
25
+ journal={arXiv preprint arXiv:1802.08379},
26
+ year={2018}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ Multimodal EmotionLines Dataset (MELD) has been created by enhancing and extending EmotionLines dataset.
32
+ MELD contains the same dialogue instances available in EmotionLines, but it also encompasses audio and
33
+ visual modality along with text. MELD has more than 1400 dialogues and 13000 utterances from Friends TV series.
34
+ Multiple speakers participated in the dialogues. Each utterance in a dialogue has been labeled by any of these
35
+ seven emotions -- Anger, Disgust, Sadness, Joy, Neutral, Surprise and Fear. MELD also has sentiment (positive,
36
+ negative and neutral) annotation for each utterance.
37
+
38
+ This dataset is slightly modified, so that it concentrates on Emotion recognition in audio input only.
39
+ """
40
+
41
+ _HOMEPAGE = "https://affective-meld.github.io/"
42
+
43
+ _LICENSE = "CC BY 4.0"
44
+
45
+ _DATA_URL = "https://drive.google.com/uc?export=download&id=1TPr9v5Vz1qQuxPWcr8RedfuQvLyuG1lm"
46
+
47
+ #------------------------------------------------------------------------------
48
+ # Define Dataset Configuration (e.g., subset of dataset, but it is not used here.)
49
+ class DS_Config(datasets.BuilderConfig):
50
+ #--------------------------------------------------------------------------
51
+ def __init__(self, name, description, homepage, data_url):
52
+
53
+ super(DS_Config, self).__init__(
54
+ name = self.name,
55
+ version = datasets.Version("1.0.0"),
56
+ description = self.description,
57
+ )
58
+ self.name = name
59
+ self.description = description
60
+ self.homepage = homepage
61
+ self.data_url = data_url
62
+ #------------------------------------------------------------------------------
63
+ # Define Dataset Class
64
+ class MELD_Audio(datasets.GeneratorBasedBuilder):
65
+ #--------------------------------------------------------------------------
66
+ BUILDER_CONFIGS = [DS_Config(
67
+ name = "MELD_Audio",
68
+ description = _DESCRIPTION,
69
+ homepage = _HOMEPAGE,
70
+ data_url = _DATA_URL
71
+ )]
72
+ #--------------------------------------------------------------------------
73
+ '''
74
+ Define the "column header" (feature) of a datum.
75
+ 3 Features:
76
+ 1) path_to_file
77
+ 2) audio samples
78
+ 3) emotion label
79
+ '''
80
+ def _info(self):
81
+
82
+ features = datasets.Features(
83
+ {
84
+ "path": datasets.Value("string"),
85
+ "audio": datasets.Audio(sampling_rate = 16000),
86
+ "label": datasets.ClassLabel(
87
+ names = [
88
+ "neutral",
89
+ "joy",
90
+ "sadness",
91
+ "anger",
92
+ "surprise",
93
+ "fear",
94
+ "disgust"
95
+ ])
96
+ }
97
+ )
98
+
99
+ # return dataset info and data feature info
100
+ return datasets.DatasetInfo(
101
+ description = _DESCRIPTION,
102
+ features = features,
103
+ homepage = _HOMEPAGE,
104
+ citation = _CITATION,
105
+ )
106
+ #--------------------------------------------------------------------------
107
+ def _split_generators(self, dl_manager):
108
+
109
+ dataset_path = dl_manager.download_and_extract(self.config.data_url)
110
+
111
+ return [
112
+ datasets.SplitGenerator(
113
+ name = datasets.Split.TRAIN,
114
+ gen_kwargs = {"split_key": "train", "dataset_path": dataset_path},
115
+ ),
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.VALIDATION,
118
+ gen_kwargs={"split_key": "dev", "dataset_path": dataset_path},
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.TEST,
122
+ gen_kwargs={"split_key": "test", "dataset_path": dataset_path},
123
+ )
124
+ ]
125
+ #--------------------------------------------------------------------------
126
+ def _generate_examples(self, split_key, dataset_path):
127
+ '''
128
+ Get the audio file and set the corresponding labels
129
+ '''
130
+ key = 0
131
+ text_path = "./" + split_key + ".csv"
132
+ with open(text_path, encoding = "utf-8") as csv_file:
133
+ csv_reader = csv.reader(csv_file, delimiter = ",", skipinitialspace=True)
134
+ next(csv_reader)
135
+ for row in csv_reader:
136
+ _, _, _, emotion, _, dialogue_id, utterance_id, _, _, _, _ = row
137
+ filename = "./" + split_key + "/dia" + dialogue_id + "_utt" + utterance_id + ".mp3"
138
+ yield key, {
139
+ "path": dataset_path + "/" + filename,
140
+ # huggingface dataset's will use soundfile to read the audio file
141
+ "audio": dataset_path + "/" + filename,
142
+ "label": emotion,
143
+ }
144
+ key += 1
145
+ #------------------------------------------------------------------------------
146
+
147
+
148
+