Datasets:

License:

Add support for streaming

#1
by mariosasko - opened
Files changed (2) hide show
  1. README.md +212 -1
  2. ted_talks_iwslt.py +130 -92
README.md CHANGED
@@ -145,6 +145,217 @@ configs:
145
  - nl_hi_2014
146
  - nl_hi_2015
147
  - nl_hi_2016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  ---
149
 
150
  # Dataset Card for Web Inventory of Transcribed & Translated(WIT) Ted Talks
@@ -470,4 +681,4 @@ cc-by-nc-nd-4.0
470
 
471
  ### Contributions
472
 
473
- Thanks to [@skyprince999](https://github.com/skyprince999) for adding this dataset.
 
145
  - nl_hi_2014
146
  - nl_hi_2015
147
  - nl_hi_2016
148
+ dataset_info:
149
+ - config_name: eu_ca_2014
150
+ features:
151
+ - name: translation
152
+ dtype:
153
+ translation:
154
+ languages:
155
+ - eu
156
+ - ca
157
+ splits:
158
+ - name: train
159
+ num_bytes: 15192
160
+ num_examples: 44
161
+ download_size: 1666674366
162
+ dataset_size: 15192
163
+ - config_name: eu_ca_2015
164
+ features:
165
+ - name: translation
166
+ dtype:
167
+ translation:
168
+ languages:
169
+ - eu
170
+ - ca
171
+ splits:
172
+ - name: train
173
+ num_bytes: 18768
174
+ num_examples: 52
175
+ download_size: 1666674366
176
+ dataset_size: 18768
177
+ - config_name: eu_ca_2016
178
+ features:
179
+ - name: translation
180
+ dtype:
181
+ translation:
182
+ languages:
183
+ - eu
184
+ - ca
185
+ splits:
186
+ - name: train
187
+ num_bytes: 19506
188
+ num_examples: 54
189
+ download_size: 1666674366
190
+ dataset_size: 19506
191
+ - config_name: nl_en_2014
192
+ features:
193
+ - name: translation
194
+ dtype:
195
+ translation:
196
+ languages:
197
+ - nl
198
+ - en
199
+ splits:
200
+ - name: train
201
+ num_bytes: 1035545
202
+ num_examples: 2966
203
+ download_size: 1666674366
204
+ dataset_size: 1035545
205
+ - config_name: nl_en_2015
206
+ features:
207
+ - name: translation
208
+ dtype:
209
+ translation:
210
+ languages:
211
+ - nl
212
+ - en
213
+ splits:
214
+ - name: train
215
+ num_bytes: 1292610
216
+ num_examples: 3550
217
+ download_size: 1666674366
218
+ dataset_size: 1292610
219
+ - config_name: nl_en_2016
220
+ features:
221
+ - name: translation
222
+ dtype:
223
+ translation:
224
+ languages:
225
+ - nl
226
+ - en
227
+ splits:
228
+ - name: train
229
+ num_bytes: 1434207
230
+ num_examples: 3852
231
+ download_size: 1666674366
232
+ dataset_size: 1434207
233
+ - config_name: nl_hi_2014
234
+ features:
235
+ - name: translation
236
+ dtype:
237
+ translation:
238
+ languages:
239
+ - nl
240
+ - hi
241
+ splits:
242
+ - name: train
243
+ num_bytes: 214870
244
+ num_examples: 367
245
+ download_size: 1666674366
246
+ dataset_size: 214870
247
+ - config_name: nl_hi_2015
248
+ features:
249
+ - name: translation
250
+ dtype:
251
+ translation:
252
+ languages:
253
+ - nl
254
+ - hi
255
+ splits:
256
+ - name: train
257
+ num_bytes: 252192
258
+ num_examples: 421
259
+ download_size: 1666674366
260
+ dataset_size: 252192
261
+ - config_name: nl_hi_2016
262
+ features:
263
+ - name: translation
264
+ dtype:
265
+ translation:
266
+ languages:
267
+ - nl
268
+ - hi
269
+ splits:
270
+ - name: train
271
+ num_bytes: 310922
272
+ num_examples: 496
273
+ download_size: 1666674366
274
+ dataset_size: 310922
275
+ - config_name: de_ja_2014
276
+ features:
277
+ - name: translation
278
+ dtype:
279
+ translation:
280
+ languages:
281
+ - de
282
+ - ja
283
+ splits:
284
+ - name: train
285
+ num_bytes: 1074403
286
+ num_examples: 2536
287
+ download_size: 1666674366
288
+ dataset_size: 1074403
289
+ - config_name: de_ja_2015
290
+ features:
291
+ - name: translation
292
+ dtype:
293
+ translation:
294
+ languages:
295
+ - de
296
+ - ja
297
+ splits:
298
+ - name: train
299
+ num_bytes: 1442047
300
+ num_examples: 3247
301
+ download_size: 1666674366
302
+ dataset_size: 1442047
303
+ - config_name: de_ja_2016
304
+ features:
305
+ - name: translation
306
+ dtype:
307
+ translation:
308
+ languages:
309
+ - de
310
+ - ja
311
+ splits:
312
+ - name: train
313
+ num_bytes: 1630729
314
+ num_examples: 3590
315
+ download_size: 1666674366
316
+ dataset_size: 1630729
317
+ - config_name: fr-ca_hi_2014
318
+ features:
319
+ - name: translation
320
+ dtype:
321
+ translation:
322
+ languages:
323
+ - fr-ca
324
+ - hi
325
+ splits:
326
+ - name: train
327
+ num_bytes: 74472
328
+ num_examples: 127
329
+ download_size: 1666674366
330
+ dataset_size: 74472
331
+ - config_name: fr-ca_hi_2015
332
+ features:
333
+ - name: translation
334
+ dtype:
335
+ translation:
336
+ languages:
337
+ - fr-ca
338
+ - hi
339
+ splits:
340
+ - name: train
341
+ num_bytes: 82448
342
+ num_examples: 141
343
+ download_size: 1666674366
344
+ dataset_size: 82448
345
+ - config_name: fr-ca_hi_2016
346
+ features:
347
+ - name: translation
348
+ dtype:
349
+ translation:
350
+ languages:
351
+ - fr-ca
352
+ - hi
353
+ splits:
354
+ - name: train
355
+ num_bytes: 93425
356
+ num_examples: 156
357
+ download_size: 1666674366
358
+ dataset_size: 93425
359
  ---
360
 
361
  # Dataset Card for Web Inventory of Transcribed & Translated(WIT) Ted Talks
 
681
 
682
  ### Contributions
683
 
684
+ Thanks to [@skyprince999](https://github.com/skyprince999) for adding this dataset.
ted_talks_iwslt.py CHANGED
@@ -15,14 +15,13 @@
15
  """TED TALKS IWSLT: Web Inventory of Transcribed and Translated Ted Talks in 109 languages."""
16
 
17
 
18
- import os
19
  import xml.etree.ElementTree as ET
20
  import zipfile
21
  from collections import defaultdict
22
 
23
  import datasets
24
 
25
-
26
  logger = datasets.logging.get_logger(__name__)
27
 
28
 
@@ -63,7 +62,9 @@ _LICENSE = "CC-BY-NC-4.0"
63
  # TODO: Add link to the official dataset URLs here
64
  # The HuggingFace dataset library don't host the datasets but only point to the original files
65
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
66
- _URL = "https://drive.google.com/u/0/uc?id=1Cz1Un9p8Xn9IpEMMrg2kXSDt0dnjxc4z&export=download"
 
 
67
 
68
  _LANGUAGES = (
69
  "mr",
@@ -189,7 +190,11 @@ _LANGUAGE_PAIRS = [
189
  # Year subscripts for the specific folder
190
  _YEAR = {"2014": "-20140120", "2015": "-20150530", "2016": "-20160408"}
191
 
192
- _YEAR_FOLDER = {"2014": "XML_releases/xml-20140120", "2015": "XML_releases/xml-20150616", "2016": "XML_releases/xml"}
 
 
 
 
193
 
194
 
195
  class TedTalksIWSLTConfig(datasets.BuilderConfig):
@@ -209,10 +214,14 @@ class TedTalksIWSLTConfig(datasets.BuilderConfig):
209
  source, target = language_pair
210
  assert source in _LANGUAGES, f"Invalid source code in language pair: {source}"
211
  assert target in _LANGUAGES, f"Invalid target code in language pair: {target}"
212
- assert source != target, f"Source::{source} and Target::{target} language pairs cannot be the same!"
 
 
213
  assert year in _YEAR.keys()
214
 
215
- description = f"Translation Ted Talks dataset (WIT3) between {source} and {target}"
 
 
216
  super(TedTalksIWSLTConfig, self).__init__(
217
  name=name,
218
  description=description,
@@ -232,7 +241,9 @@ class TedTalksIWSLT(datasets.GeneratorBasedBuilder):
232
  BUILDER_CONFIG_CLASS = TedTalksIWSLTConfig
233
 
234
  BUILDER_CONFIGS = [
235
- TedTalksIWSLTConfig(language_pair=language_pair, year=year, version=datasets.Version("1.1.0"))
 
 
236
  for language_pair in _LANGUAGE_PAIRS
237
  for year in _YEAR.keys()
238
  ]
@@ -240,7 +251,9 @@ class TedTalksIWSLT(datasets.GeneratorBasedBuilder):
240
  def _info(self):
241
  features = datasets.Features(
242
  {
243
- "translation": datasets.features.Translation(languages=self.config.language_pair),
 
 
244
  },
245
  )
246
 
@@ -263,88 +276,79 @@ class TedTalksIWSLT(datasets.GeneratorBasedBuilder):
263
 
264
  def _split_generators(self, dl_manager):
265
  """Returns SplitGenerators."""
266
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
267
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
268
-
269
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
270
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
271
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
272
- my_urls = _URL
273
- language_pair = self.config.language_pair
274
- year = self.config.year
275
-
276
- data_dir = dl_manager.download_and_extract(my_urls)
277
-
278
- zip_file_pair0 = os.path.join(data_dir, _YEAR_FOLDER[year] + "/ted_" + language_pair[0] + _YEAR[year] + ".zip")
279
- zip_file_pair1 = os.path.join(data_dir, _YEAR_FOLDER[year] + "/ted_" + language_pair[1] + _YEAR[year] + ".zip")
280
 
281
  return [
282
  datasets.SplitGenerator(
283
  name=datasets.Split.TRAIN,
284
- # These kwargs will be passed to _generate_examples
285
  gen_kwargs={
286
- "filepath": [zip_file_pair0, zip_file_pair1],
287
- "split": "train",
288
  },
289
  ),
290
  ]
291
 
292
- def _generate_examples(self, filepath, split):
293
  """Yields examples."""
294
- # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
295
- # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
296
- # The key is not important, it's more here for legacy reason (legacy from tfds)
297
- source = filepath[0]
298
- target = filepath[1]
299
-
300
- language_pair = self.config.name.split("_")
301
-
302
- def et_to_dict(tree):
303
- """This is used to convert the xml to a list of dicts"""
304
-
305
- dct = {tree.tag: {} if tree.attrib else None}
306
- children = list(tree)
307
- if children:
308
- dd = defaultdict(list)
309
- for dc in map(et_to_dict, children):
310
- for k, v in dc.items():
311
- dd[k].append(v)
312
- dct = {tree.tag: dd}
313
- if tree.attrib:
314
- dct[tree.tag].update((k, v) for k, v in tree.attrib.items())
315
- if tree.text:
316
- text = tree.text.strip()
317
- if children or tree.attrib:
318
- if text:
319
- dct[tree.tag]["text"] = text
320
- else:
321
- dct[tree.tag] = text
322
- return dct
323
-
324
- if os.path.exists(source) and os.path.exists(target):
325
-
326
- with zipfile.ZipFile(source) as zf_source:
327
- with zipfile.ZipFile(target) as zf_target:
328
- try:
329
- tree_source = ET.parse(zf_source.open(source.split("/")[-1][:-3] + "xml"))
330
- root_source = tree_source.getroot()
331
- source_talks = et_to_dict(root_source).get("xml").get("file")
332
-
333
- tree_target = ET.parse(zf_target.open(target.split("/")[-1][:-3] + "xml"))
334
- root_target = tree_target.getroot()
335
- target_talks = et_to_dict(root_target).get("xml").get("file")
336
-
337
- source_ids = [talk.get("head")[0].get("talkid") for talk in source_talks]
338
- target_ids = [talk.get("head")[0].get("talkid") for talk in target_talks]
339
- except Exception as pe:
340
- logger.warning(f"ERROR: {pe}")
341
- logger.warning(
342
- f"This likely means that you have a malformed XML file!\nEither {source} or {target}\n"
343
- )
344
- source_ids = list()
345
- target_ids = list()
346
- else:
347
- logger.warning(f"File doesn't exist {source} or {target}")
 
 
 
 
 
 
348
  source_ids = list()
349
  target_ids = list()
350
 
@@ -353,8 +357,18 @@ class TedTalksIWSLT(datasets.GeneratorBasedBuilder):
353
  translation = list()
354
 
355
  for talkid in comm_talkids:
356
- source = list(filter(lambda talk: talk.get("head")[0].get("talkid") == talkid, source_talks))
357
- target = list(filter(lambda talk: talk.get("head")[0].get("talkid") == talkid, target_talks))
 
 
 
 
 
 
 
 
 
 
358
 
359
  if len(source) == 0 or len(target) == 0:
360
  pass
@@ -362,33 +376,57 @@ class TedTalksIWSLT(datasets.GeneratorBasedBuilder):
362
  source = source[0]
363
  target = target[0]
364
 
365
- if source.get("head")[0].get("description") and target.get("head")[0].get("description"):
366
- if source.get("head")[0].get("description")[0] and target.get("head")[0].get("description")[0]:
 
 
 
 
 
367
  temp_dict = dict()
368
  temp_dict["id"] = source.get("head")[0].get("talkid")[0] + "_1"
369
  temp_dict[language_pair[0]] = (
370
- source.get("head")[0].get("description")[0].replace("TED Talk Subtitles and Transcript: ", "")
 
 
371
  )
372
  temp_dict[language_pair[1]] = (
373
- target.get("head")[0].get("description")[0].replace("TED Talk Subtitles and Transcript: ", "")
 
 
374
  )
375
  translation.append(temp_dict)
376
 
377
- if source.get("head")[0].get("title") and target.get("head")[0].get("title"):
378
- if source.get("head")[0].get("title")[0] and target.get("head")[0].get("title")[0]:
 
 
 
 
 
379
  temp_dict = dict()
380
  temp_dict["id"] = source.get("head")[0].get("talkid")[0] + "_2"
381
  temp_dict[language_pair[0]] = source.get("head")[0].get("title")[0]
382
  temp_dict[language_pair[1]] = target.get("head")[0].get("title")[0]
383
  translation.append(temp_dict)
384
 
385
- if source.get("head")[0].get("seekvideo") and target.get("head")[0].get("seekvideo"):
386
- source_transc = source.get("head")[0].get("transcription")[0].get("seekvideo")
387
- target_transc = target.get("head")[0].get("transcription")[0].get("seekvideo")
 
 
 
 
 
 
388
 
389
  transc = zip(source_transc, target_transc)
390
  transcriptions = [
391
- {"id": s.get("id"), language_pair[0]: s.get("text"), language_pair[1]: t.get("text")}
 
 
 
 
392
  for s, t in transc
393
  ]
394
  translation.extend(transcriptions)
 
15
  """TED TALKS IWSLT: Web Inventory of Transcribed and Translated Ted Talks in 109 languages."""
16
 
17
 
18
+ import io
19
  import xml.etree.ElementTree as ET
20
  import zipfile
21
  from collections import defaultdict
22
 
23
  import datasets
24
 
 
25
  logger = datasets.logging.get_logger(__name__)
26
 
27
 
 
62
  # TODO: Add link to the official dataset URLs here
63
  # The HuggingFace dataset library don't host the datasets but only point to the original files
64
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
65
+ _URL = (
66
+ "https://huggingface.co/datasets/ted_talks_iwslt/resolve/main/data/XML_releases.tgz"
67
+ )
68
 
69
  _LANGUAGES = (
70
  "mr",
 
190
  # Year subscripts for the specific folder
191
  _YEAR = {"2014": "-20140120", "2015": "-20150530", "2016": "-20160408"}
192
 
193
+ _YEAR_FOLDER = {
194
+ "2014": "XML_releases/xml-20140120",
195
+ "2015": "XML_releases/xml-20150616",
196
+ "2016": "XML_releases/xml",
197
+ }
198
 
199
 
200
  class TedTalksIWSLTConfig(datasets.BuilderConfig):
 
214
  source, target = language_pair
215
  assert source in _LANGUAGES, f"Invalid source code in language pair: {source}"
216
  assert target in _LANGUAGES, f"Invalid target code in language pair: {target}"
217
+ assert (
218
+ source != target
219
+ ), f"Source::{source} and Target::{target} language pairs cannot be the same!"
220
  assert year in _YEAR.keys()
221
 
222
+ description = (
223
+ f"Translation Ted Talks dataset (WIT3) between {source} and {target}"
224
+ )
225
  super(TedTalksIWSLTConfig, self).__init__(
226
  name=name,
227
  description=description,
 
241
  BUILDER_CONFIG_CLASS = TedTalksIWSLTConfig
242
 
243
  BUILDER_CONFIGS = [
244
+ TedTalksIWSLTConfig(
245
+ language_pair=language_pair, year=year, version=datasets.Version("1.1.0")
246
+ )
247
  for language_pair in _LANGUAGE_PAIRS
248
  for year in _YEAR.keys()
249
  ]
 
251
  def _info(self):
252
  features = datasets.Features(
253
  {
254
+ "translation": datasets.features.Translation(
255
+ languages=self.config.language_pair
256
+ ),
257
  },
258
  )
259
 
 
276
 
277
  def _split_generators(self, dl_manager):
278
  """Returns SplitGenerators."""
279
+ data_dir = dl_manager.download(_URL)
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
  return [
282
  datasets.SplitGenerator(
283
  name=datasets.Split.TRAIN,
 
284
  gen_kwargs={
285
+ "files": dl_manager.iter_archive(data_dir),
 
286
  },
287
  ),
288
  ]
289
 
290
+ def _generate_examples(self, files):
291
  """Yields examples."""
292
+
293
+ def parse_zip_file(path, file):
294
+ def et_to_dict(tree):
295
+ """This is used to convert the xml to a list of dicts"""
296
+
297
+ dct = {tree.tag: {} if tree.attrib else None}
298
+ children = list(tree)
299
+ if children:
300
+ dd = defaultdict(list)
301
+ for dc in map(et_to_dict, children):
302
+ for k, v in dc.items():
303
+ dd[k].append(v)
304
+ dct = {tree.tag: dd}
305
+ if tree.attrib:
306
+ dct[tree.tag].update((k, v) for k, v in tree.attrib.items())
307
+ if tree.text:
308
+ text = tree.text.strip()
309
+ if children or tree.attrib:
310
+ if text:
311
+ dct[tree.tag]["text"] = text
312
+ else:
313
+ dct[tree.tag] = text
314
+ return dct
315
+
316
+ with zipfile.ZipFile(io.BytesIO(file)) as zf:
317
+ try:
318
+ tree = ET.parse(zf.open(path.split("/")[-1][:-3] + "xml"))
319
+ root = tree.getroot()
320
+ talks = et_to_dict(root).get("xml").get("file")
321
+ ids = [talk.get("head")[0].get("talkid") for talk in talks]
322
+ except Exception as pe:
323
+ logger.warning(f"ERROR: {pe}")
324
+ logger.warning(
325
+ "This likely means that you have a malformed XML file!"
326
+ )
327
+ ids = []
328
+ return talks, ids
329
+
330
+ language_pair = self.config.language_pair
331
+ year = self.config.year
332
+
333
+ source_file_path = (
334
+ _YEAR_FOLDER[year] + "/ted_" + language_pair[0] + _YEAR[year] + ".zip"
335
+ )
336
+ target_file_path = (
337
+ _YEAR_FOLDER[year] + "/ted_" + language_pair[1] + _YEAR[year] + ".zip"
338
+ )
339
+
340
+ source_talks, source_ids = None, None
341
+ target_talks, target_ids = None, None
342
+ for path, file in files:
343
+ if source_ids is not None and target_ids is not None:
344
+ break
345
+
346
+ if source_ids is None and path.endswith(source_file_path):
347
+ source_talks, source_ids = parse_zip_file(path, file.read())
348
+ elif target_ids is None and path.endswith(target_file_path):
349
+ target_talks, target_ids = parse_zip_file(path, file.read())
350
+
351
+ if source_ids is None or target_ids is None:
352
  source_ids = list()
353
  target_ids = list()
354
 
 
357
  translation = list()
358
 
359
  for talkid in comm_talkids:
360
+ source = list(
361
+ filter(
362
+ lambda talk: talk.get("head")[0].get("talkid") == talkid,
363
+ source_talks,
364
+ )
365
+ )
366
+ target = list(
367
+ filter(
368
+ lambda talk: talk.get("head")[0].get("talkid") == talkid,
369
+ target_talks,
370
+ )
371
+ )
372
 
373
  if len(source) == 0 or len(target) == 0:
374
  pass
 
376
  source = source[0]
377
  target = target[0]
378
 
379
+ if source.get("head")[0].get("description") and target.get("head")[0].get(
380
+ "description"
381
+ ):
382
+ if (
383
+ source.get("head")[0].get("description")[0]
384
+ and target.get("head")[0].get("description")[0]
385
+ ):
386
  temp_dict = dict()
387
  temp_dict["id"] = source.get("head")[0].get("talkid")[0] + "_1"
388
  temp_dict[language_pair[0]] = (
389
+ source.get("head")[0]
390
+ .get("description")[0]
391
+ .replace("TED Talk Subtitles and Transcript: ", "")
392
  )
393
  temp_dict[language_pair[1]] = (
394
+ target.get("head")[0]
395
+ .get("description")[0]
396
+ .replace("TED Talk Subtitles and Transcript: ", "")
397
  )
398
  translation.append(temp_dict)
399
 
400
+ if source.get("head")[0].get("title") and target.get("head")[0].get(
401
+ "title"
402
+ ):
403
+ if (
404
+ source.get("head")[0].get("title")[0]
405
+ and target.get("head")[0].get("title")[0]
406
+ ):
407
  temp_dict = dict()
408
  temp_dict["id"] = source.get("head")[0].get("talkid")[0] + "_2"
409
  temp_dict[language_pair[0]] = source.get("head")[0].get("title")[0]
410
  temp_dict[language_pair[1]] = target.get("head")[0].get("title")[0]
411
  translation.append(temp_dict)
412
 
413
+ if source.get("head")[0].get("seekvideo") and target.get("head")[0].get(
414
+ "seekvideo"
415
+ ):
416
+ source_transc = (
417
+ source.get("head")[0].get("transcription")[0].get("seekvideo")
418
+ )
419
+ target_transc = (
420
+ target.get("head")[0].get("transcription")[0].get("seekvideo")
421
+ )
422
 
423
  transc = zip(source_transc, target_transc)
424
  transcriptions = [
425
+ {
426
+ "id": s.get("id"),
427
+ language_pair[0]: s.get("text"),
428
+ language_pair[1]: t.get("text"),
429
+ }
430
  for s, t in transc
431
  ]
432
  translation.extend(transcriptions)