Padomin commited on
Commit
14ccf7d
1 Parent(s): 11c9e43

Create ami-ihm-asr.py

Browse files
Files changed (1) hide show
  1. ami-ihm-asr.py +159 -0
ami-ihm-asr.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Common Voice Dataset"""
16
+
17
+ import json
18
+ import os
19
+ from copy import deepcopy
20
+ import re
21
+ import unicodedata
22
+ from more_itertools import windowed
23
+ import datasets
24
+
25
+ _CITATION = """\
26
+ """
27
+
28
+ _DESCRIPTION = """\
29
+ ami-ihmを音声認識した誤り訂正用データセット
30
+ """
31
+ _HOMEPAGE = ""
32
+ _LICENSE = ""
33
+
34
+ URLS = {
35
+ "ctc-large": {
36
+ "text": "https://huggingface.co/datasets/Padomin/ami-ihm-asr/resolve/main/ami-ihm-ctc-large.tar.gz",
37
+ },
38
+ }
39
+
40
+
41
+ class ami_ihm_asr_config(datasets.BuilderConfig):
42
+ def __init__(self, n_fronts=0, n_bodies=1, n_rears=0, front_prefix='front:\n', body_prefix='body:\n', rear_prefix='rear:\n', **kwargs):
43
+ super(ami_ihm_asr_config, self).__init__(**kwargs)
44
+ self.n_fronts = n_fronts
45
+ self.n_bodies = n_bodies
46
+ self.n_rears = n_rears
47
+ self.front_prefix = front_prefix
48
+ self.body_prefix = body_prefix
49
+ self.rear_prefix = rear_prefix
50
+
51
+ class ami_ihm_asr(datasets.GeneratorBasedBuilder):
52
+ VERSION = datasets.Version("0.2.0")
53
+ BUILDER_CONFIGS = [
54
+ ami_ihm_asr_config(name="v1", version=VERSION),
55
+ ami_ihm_asr_config(name="v2", version=VERSION),
56
+ ami_ihm_asr_config(name="ctc-large", version=VERSION),
57
+ ami_ihm_asr_config(name="xlsr", version=VERSION),
58
+ ]
59
+ DEFAULT_CONFIG_NAME = "ctc-large" # It's not mandatory to have a default configuration. Just use one if it make sense.
60
+ BUILDER_CONFIG_CLASS = ami_ihm_asr_config
61
+
62
+ def _info(self):
63
+ feature_dict = {
64
+ "text": datasets.Value("string"),
65
+ "text_asr": datasets.Value("string"),
66
+ "src": datasets.Value("string"),
67
+ "tgt": datasets.Value("string"),
68
+ "id": datasets.Value("string")
69
+ }
70
+
71
+ features = datasets.Features(feature_dict)
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=features,
75
+ supervised_keys=None,
76
+ homepage=_HOMEPAGE,
77
+ license=_LICENSE,
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ """Returns SplitGenerators."""
83
+ if "v1" in self.config.name:
84
+ urls = deepcopy(URLS["v1"])
85
+ if "v2" in self.config.name:
86
+ urls = deepcopy(URLS["v2"])
87
+ if "ctc-large" in self.config.name:
88
+ urls = deepcopy(URLS["ctc-large"])
89
+ if "xlsr" in self.config.name:
90
+ urls = deepcopy(URLS["xlsr"])
91
+
92
+ dl_path = dl_manager.download_and_extract(urls)
93
+
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ gen_kwargs={
98
+ "filepath": os.path.join(dl_path["text"], "train.jsonl"),
99
+ "split": "train",
100
+ },
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TEST,
104
+ gen_kwargs={
105
+ "filepath": os.path.join(dl_path["text"], "test.jsonl"),
106
+ "split": "test",
107
+ },
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.VALIDATION,
111
+ gen_kwargs={
112
+ "filepath": os.path.join(dl_path["text"], "validation.jsonl"),
113
+ "split": "validation",
114
+ },
115
+ ),
116
+ ]
117
+
118
+ def _generate_examples(self, filepath, split):
119
+ """Yields examples."""
120
+ id_ = 0
121
+ with open(filepath, encoding="utf-8") as f:
122
+ for line in f:
123
+ doc = json.loads(line)
124
+ utterances = doc['utterances']
125
+ # divide text and asr
126
+ texts_asr = [utt['asr'] for utt in utterances]
127
+ texts = [utt['text'] for utt in utterances]
128
+ # window considering front and rear contexts
129
+ if split == "train":
130
+ windowed_texts_asr = windowed([''] * self.config.n_fronts + texts_asr + [''] * self.config.n_rears, self.config.n_bodies + self.config.n_fronts + self.config.n_rears)
131
+ windowed_texts = windowed(texts, self.config.n_bodies)
132
+ else:
133
+ windowed_texts_asr = windowed([''] * self.config.n_fronts + texts_asr + [''] * self.config.n_rears, self.config.n_bodies + self.config.n_fronts + self.config.n_rears, fillvalue='', step=self.config.n_bodies)
134
+ windowed_texts = windowed(texts, self.config.n_bodies, fillvalue='', step=self.config.n_bodies)
135
+
136
+ for text_asr, text, utt in zip(windowed_texts_asr, windowed_texts, utterances):
137
+ src = ''
138
+ if self.config.n_fronts > 0:
139
+ src += self.config.front_prefix
140
+ src += '\n'.join(text_asr[:self.config.n_fronts])
141
+ src += '\n'
142
+ src += self.config.body_prefix
143
+ src += '\n'.join(text_asr[self.config.n_fronts:self.config.n_fronts + self.config.n_bodies])
144
+ if self.config.n_rears > 0:
145
+ src += '\n' + self.config.rear_prefix
146
+ src += '\n'.join(text_asr[self.config.n_fronts + self.config.n_bodies:])
147
+ tgt = '\n'.join(text)
148
+
149
+ data = {
150
+ "text": utt["text"],
151
+ "text_asr": utt["asr"],
152
+ 'src': src,
153
+ 'tgt': tgt,
154
+ 'id': doc["id"],
155
+ }
156
+
157
+ yield id_, data
158
+
159
+ id_ += 1