nicolaus625 commited on
Commit
bb01eca
1 Parent(s): 356a812

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/conf/mfcc_hires.conf +11 -0
  2. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/conf/online_cmvn.conf +1 -0
  3. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/conf/queue.conf +9 -0
  4. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/chain/compare_wer.sh +131 -0
  5. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/chain/run_chain_common.sh +82 -0
  6. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/chain/run_tdnn_1d.sh +230 -0
  7. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/check_tools.sh +50 -0
  8. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/nnet3/run_ivector_common.sh +188 -0
  9. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/prepare_data.py +130 -0
  10. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/prepare_dict.sh +154 -0
  11. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/score.sh +3 -0
  12. DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/train_lms_srilm.sh +167 -0
  13. DSing/sing_300x30x2/AU/AUVocals/101935856_2505827-1179367744_1581430405-AU-M-411494616.wav +3 -0
  14. DSing/sing_300x30x2/AU/AUVocals/1127321777_3089957-1008354660_1549665636-AU-F-1327123746.wav +3 -0
  15. DSing/sing_300x30x2/AU/AUVocals/119790220_1667353-120933022_1544151714-AU-F-120928760.wav +3 -0
  16. DSing/sing_300x30x2/AU/AUVocals/119790220_263436-441829487_1587105890-AU-F-441829569.wav +3 -0
  17. DSing/sing_300x30x2/AU/AUVocals/17119916_667134-406514963_1571668143-AU-M-757521787.wav +3 -0
  18. DSing/sing_300x30x2/AU/AUVocals/3217450_3217450-1193001215_1411682977-AU-F-126106818.wav +3 -0
  19. DSing/sing_300x30x2/AU/AUVocals/3219205_3219205-32449093_1630217042-AU-F-1336713592.wav +3 -0
  20. DSing/sing_300x30x2/AU/AUVocals/331426834_128031-55256932_1613332221-AU-M-55260049.wav +3 -0
  21. DSing/sing_300x30x2/AU/AUVocals/3448294_3448294-1042608402_1487495573-AU-M-524731806.wav +3 -0
  22. DSing/sing_300x30x2/AU/AUVocals/3582632_3582632-425532491_1602008190-AU-F-947968160.wav +3 -0
  23. DSing/sing_300x30x2/AU/AUVocals/366321445_101397-1340932642_1567244952-AU-M-429038226.wav +3 -0
  24. DSing/sing_300x30x2/AU/AUVocals/3703714_232358-541252383_1457684489-AU-F-521014707.wav +3 -0
  25. DSing/sing_300x30x2/AU/AUVocals/3769302_3769302-1088349686_1621899122-AU-F-376535394.wav +3 -0
  26. DSing/sing_300x30x2/AU/AUVocals/3769415_3769415-550188297_1562374359-AU-M-180102765.wav +3 -0
  27. DSing/sing_300x30x2/AU/AUVocals/3769646_3769646-489322685_198913142-AU-F-210384260.wav +3 -0
  28. DSing/sing_300x30x2/AU/AUVocals/3769825_3769825-1221818620_1608937070-AU-F-1304504139.wav +3 -0
  29. DSing/sing_300x30x2/AU/AUVocals/3770436_3770436-32449093_1621119562-AU-F-1336713592.wav +3 -0
  30. DSing/sing_300x30x2/AU/AUVocals/3770715_3770715-1004365916_1631793249-AU-F-140937561.wav +3 -0
  31. DSing/sing_300x30x2/AU/AUVocals/3771478_3771478-538293740_1663834938-AU-M-180102765.wav +3 -0
  32. DSing/sing_300x30x2/AU/AUVocals/40228097_169333-863902410_1558169302-AU-F-238098736.wav +3 -0
  33. DSing/sing_300x30x2/AU/AUVocals/409146418_113102-1116943097_1689981903-AU-F-1116943324.wav +3 -0
  34. DSing/sing_300x30x2/AU/AUVocals/4385219_4385219-74242534_1661534803-AU-F-578766880.wav +3 -0
  35. DSing/sing_300x30x2/AU/AUVocals/474572_111629-707863177_1441046998-AU-F-707873017.wav +3 -0
  36. DSing/sing_300x30x2/AU/AUVocals/530817499_1833105-882003598_1682046239-AU-M-882000596.wav +3 -0
  37. DSing/sing_300x30x2/AU/AUVocals/53709742_99250-367325249_1522374367-AU-M-449511466.wav +3 -0
  38. DSing/sing_300x30x2/AU/AUVocals/587178900_1824409-1046397871_1525665397-AU-M-1009387124.wav +3 -0
  39. DSing/sing_300x30x2/AU/AUVocals/64422077_111775-133787857_1608718178-AU-M-425576199.wav +3 -0
  40. DSing/sing_300x30x2/AU/AUVocals/66512068_448309-424554687_1634821714-AU-M-424549845.wav +3 -0
  41. DSing/sing_300x30x2/AU/AUVocals/670462920_1440483-1046340502_1538129960-AU-M-1046341495.wav +3 -0
  42. DSing/sing_300x30x2/AU/AUVocals/671453965_1179860-124368911_1654337020-AU-M-1160483819.wav +3 -0
  43. DSing/sing_300x30x2/AU/AUVocals/687441455_1598516-1283348030_1653892220-AU-M-1257175175.wav +3 -0
  44. DSing/sing_300x30x2/AU/AUVocals/689806719_841385-77916125_1576405705-AU-M-77917061.wav +3 -0
  45. DSing/sing_300x30x2/AU/AUVocals/709155717_2053380-1315163597_1602466189-AU-F-749289766.wav +3 -0
  46. DSing/sing_300x30x2/AU/AUVocals/757082027_986792-874217558_1597789131-AU-M-800005312.wav +3 -0
  47. DSing/sing_300x30x2/AU/AUVocals/789788546_2193873-706068094_1543553510-AU-F-706058331.wav +3 -0
  48. DSing/sing_300x30x2/AU/AUVocals/90470934_2940068-417337448_1568372565-AU-F-417337457.wav +3 -0
  49. DSing/sing_300x30x2/AU/AUVocals/95450022_546114-534290805_1577875290-AU-M-892339901.wav +3 -0
  50. DSing/sing_300x30x2/IT/ITVocals/139427932_58803-626568036_1662347130-IT-M-626573007.wav +3 -0
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/conf/mfcc_hires.conf ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # config for high-resolution MFCC features, intended for neural network training
2
+ # Note: we keep all cepstra, so it has the same info as filterbank features,
3
+ # but MFCC is more easily compressible (because less correlated) which is why
4
+ # we prefer this method.
5
+ --use-energy=false # use average of log energy, not energy.
6
+ --num-mel-bins=40 # similar to Google's setup.
7
+ --num-ceps=40 # there is no dimensionality reduction.
8
+ --low-freq=20 # low cutoff frequency for mel bins... this is high-bandwidth data, so
9
+ # there might be some information at the low end.
10
+ --high-freq=7600 # high cutoff frequently, relative to Nyquist of 8000 (=7600)
11
+ --allow_downsample=true
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/conf/online_cmvn.conf ADDED
@@ -0,0 +1 @@
 
 
1
+ # configuration file for apply-cmvn-online, used in the script ../local/run_online_decoding.sh
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/conf/queue.conf ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ command qsub -v PATH -cwd -S /bin/bash -j y
2
+ option mem=* -l rmem=$0 -j y
3
+ option mem=0 # Do not add anything to qsub_opts
4
+ option num_threads=* -pe smp $0
5
+ option num_threads=1 # Do not add anything to qsub_opts
6
+ option max_jobs_run=* -tc $0
7
+ default gpu=0
8
+ option gpu=0
9
+ option gpu=* -l gpu=$0
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/chain/compare_wer.sh ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # this script is used for comparing decoding results between systems.
4
+ # e.g. local/chain/compare_wer.sh exp/chain/tdnn_{c,d}_sp
5
+ # For use with discriminatively trained systems you specify the epochs after a colon:
6
+ # for instance,
7
+ # local/chain/compare_wer.sh exp/chain/tdnn_c_sp exp/chain/tdnn_c_sp_smbr:{1,2,3}
8
+
9
+
10
+ if [ $# == 0 ]; then
11
+ echo "Usage: $0: [--looped] [--online] <dir1> [<dir2> ... ]"
12
+ echo "e.g.: $0 exp/chain/tdnn_{b,c}_sp"
13
+ echo "or (with epoch numbers for discriminative training):"
14
+ echo "$0 exp/chain/tdnn_b_sp_disc:{1,2,3}"
15
+ exit 1
16
+ fi
17
+
18
+ echo "# $0 $*"
19
+
20
+ include_looped=false
21
+ if [ "$1" == "--looped" ]; then
22
+ include_looped=true
23
+ shift
24
+ fi
25
+ include_online=false
26
+ if [ "$1" == "--online" ]; then
27
+ include_online=true
28
+ shift
29
+ fi
30
+
31
+
32
+ used_epochs=false
33
+
34
+ # this function set_names is used to separate the epoch-related parts of the name
35
+ # [for discriminative training] and the regular parts of the name.
36
+ # If called with a colon-free directory name, like:
37
+ # set_names exp/chain/tdnn_lstm1e_sp_bi_smbr
38
+ # it will set dir=exp/chain/tdnn_lstm1e_sp_bi_smbr and epoch_infix=""
39
+ # If called with something like:
40
+ # set_names exp/chain/tdnn_d_sp_smbr:3
41
+ # it will set dir=exp/chain/tdnn_d_sp_smbr and epoch_infix="_epoch3"
42
+
43
+
44
+ set_names() {
45
+ if [ $# != 1 ]; then
46
+ echo "compare_wer_general.sh: internal error"
47
+ exit 1 # exit the program
48
+ fi
49
+ dirname=$(echo $1 | cut -d: -f1)
50
+ epoch=$(echo $1 | cut -s -d: -f2)
51
+ if [ -z $epoch ]; then
52
+ epoch_infix=""
53
+ else
54
+ used_epochs=true
55
+ epoch_infix=_epoch${epoch}
56
+ fi
57
+ }
58
+
59
+
60
+
61
+ echo -n "# System "
62
+ for x in $*; do printf "% 10s" " $(basename $x)"; done
63
+ echo
64
+
65
+ strings=(
66
+ "#WER dev_clean_2 (tgsmall) "
67
+ "#WER dev_clean_2 (tglarge) ")
68
+
69
+ for n in 0 1; do
70
+ echo -n "${strings[$n]}"
71
+ for x in $*; do
72
+ set_names $x # sets $dirname and $epoch_infix
73
+ decode_names=(tgsmall_dev_clean_2 tglarge_dev_clean_2)
74
+
75
+ wer=$(cat $dirname/decode_${decode_names[$n]}/wer_* | utils/best_wer.sh | awk '{print $2}')
76
+ printf "% 10s" $wer
77
+ done
78
+ echo
79
+ if $include_looped; then
80
+ echo -n "# [looped:] "
81
+ for x in $*; do
82
+ set_names $x # sets $dirname and $epoch_infix
83
+ wer=$(cat $dirname/decode_looped_${decode_names[$n]}/wer_* | utils/best_wer.sh | awk '{print $2}')
84
+ printf "% 10s" $wer
85
+ done
86
+ echo
87
+ fi
88
+ if $include_online; then
89
+ echo -n "# [online:] "
90
+ for x in $*; do
91
+ set_names $x # sets $dirname and $epoch_infix
92
+ wer=$(cat ${dirname}_online/decode_${decode_names[$n]}/wer_* | utils/best_wer.sh | awk '{print $2}')
93
+ printf "% 10s" $wer
94
+ done
95
+ echo
96
+ fi
97
+ done
98
+
99
+
100
+ if $used_epochs; then
101
+ exit 0; # the diagnostics aren't comparable between regular and discriminatively trained systems.
102
+ fi
103
+
104
+
105
+ echo -n "# Final train prob "
106
+ for x in $*; do
107
+ prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -v xent | awk '{printf("%.4f", $8)}')
108
+ printf "% 10s" $prob
109
+ done
110
+ echo
111
+
112
+ echo -n "# Final valid prob "
113
+ for x in $*; do
114
+ prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -v xent | awk '{printf("%.4f", $8)}')
115
+ printf "% 10s" $prob
116
+ done
117
+ echo
118
+
119
+ echo -n "# Final train prob (xent)"
120
+ for x in $*; do
121
+ prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -w xent | awk '{printf("%.4f", $8)}')
122
+ printf "% 10s" $prob
123
+ done
124
+ echo
125
+
126
+ echo -n "# Final valid prob (xent)"
127
+ for x in $*; do
128
+ prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -w xent | awk '{printf("%.4f", $8)}')
129
+ printf "% 10s" $prob
130
+ done
131
+ echo
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/chain/run_chain_common.sh ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # this script has common stages shared across librispeech chain recipes.
4
+ # It generates a new topology in a new lang directory, gets the alignments as
5
+ # lattices, and builds a tree for the new topology
6
+ set -e
7
+
8
+ stage=11
9
+
10
+ # input directory names. These options are actually compulsory, and they have
11
+ # been named for convenience
12
+ gmm_dir=
13
+ ali_dir=
14
+ lores_train_data_dir=
15
+
16
+ num_leaves=6000
17
+
18
+ # output directory names. They are also compulsory.
19
+ lang=
20
+ lat_dir=
21
+ tree_dir=
22
+ # End configuration section.
23
+ echo "$0 $@" # Print the command line for logging
24
+
25
+ . ./cmd.sh
26
+ . ./path.sh
27
+ . ./utils/parse_options.sh
28
+
29
+ [ -z $lang ] && echo "Set --lang, this specifies the new lang directory which will have the new topology" && exit 1;
30
+ [ -z $lat_dir ] && echo "Set --lat-dir, this specifies the experiment directory to store lattice" && exit 1;
31
+ [ -z $tree_dir ] && echo "Set --tree-dir, this specifies the directory to store new tree " && exit 1;
32
+
33
+ for f in $gmm_dir/final.mdl $ali_dir/ali.1.gz $lores_train_data_dir/feats.scp; do
34
+ [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
35
+ done
36
+
37
+ if [ $stage -le 11 ]; then
38
+ echo "$0: creating lang directory with one state per phone."
39
+ # Create a version of the lang/ directory that has one state per phone in the
40
+ # topo file. [note, it really has two states.. the first one is only repeated
41
+ # once, the second one has zero or more repeats.]
42
+ if [ -d $lang ]; then
43
+ if [ $lang/L.fst -nt data/lang/L.fst ]; then
44
+ echo "$0: $lang already exists, not overwriting it; continuing"
45
+ else
46
+ echo "$0: $lang already exists and seems to be older than data/lang..."
47
+ echo " ... not sure what to do. Exiting."
48
+ exit 1;
49
+ fi
50
+ else
51
+ cp -r data/lang $lang
52
+ silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
53
+ nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
54
+ # Use our special topology... note that later on may have to tune this
55
+ # topology.
56
+ steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
57
+ fi
58
+ fi
59
+
60
+ if [ $stage -le 12 ]; then
61
+ # Get the alignments as lattices (gives the chain training more freedom).
62
+ # use the same num-jobs as the alignments
63
+ nj=$(cat ${ali_dir}/num_jobs) || exit 1;
64
+ steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" ${lores_train_data_dir} \
65
+ $lang $gmm_dir $lat_dir
66
+ rm $lat_dir/fsts.*.gz # save space
67
+ fi
68
+
69
+ if [ $stage -le 13 ]; then
70
+ # Build a tree using our new topology. We know we have alignments for the
71
+ # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
72
+ # those.
73
+ if [ -f $tree_dir/final.mdl ]; then
74
+ echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
75
+ exit 1;
76
+ fi
77
+ steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
78
+ --context-opts "--context-width=2 --central-position=1" \
79
+ --cmd "$train_cmd" $num_leaves ${lores_train_data_dir} $lang $ali_dir $tree_dir
80
+ fi
81
+
82
+ exit 0;
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/chain/run_tdnn_1d.sh ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -e
3
+
4
+ # 1d is as 1c but a recipe based on the newer, more compact configs, and with
5
+ # various configuration changes; it also includes dropout (although I'm not
6
+ # sure whether dropout was actually helpful, that needs to be tested).
7
+ # configs for 'chain'
8
+ stage=0
9
+ decode_nj=50
10
+ train_set=train
11
+ test_sets="test dev"
12
+ gmm=tri6b_cleaned
13
+ nnet3_affix=_cleaned
14
+
15
+ # The rest are configs specific to this script. Most of the parameters
16
+ # are just hardcoded at this level, in the commands below.
17
+ affix=1d
18
+ tree_affix=
19
+ train_stage=-10
20
+ get_egs_stage=-10
21
+ decode_iter=
22
+ nj=
23
+
24
+ # TDNN options
25
+ frames_per_eg=150,110,100
26
+ remove_egs=true
27
+ common_egs_dir=
28
+ xent_regularize=0.1
29
+ dropout_schedule='0,0@0.20,0.5@0.50,0'
30
+
31
+ test_online_decoding=true # if true, it will run the last decoding stage.
32
+
33
+ # End configuration section.
34
+ echo "$0 $@" # Print the command line for logging
35
+
36
+ . ./cmd.sh
37
+ . ./path.sh
38
+ . ./utils/parse_options.sh
39
+
40
+ if ! cuda-compiled; then
41
+ cat <<EOF && exit 1
42
+ This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
43
+ If you want to use GPUs (and have them), go to src/, and configure and make on a machine
44
+ where "nvcc" is installed.
45
+ EOF
46
+ fi
47
+
48
+ # The iVector-extraction and feature-dumping parts are the same as the standard
49
+ # nnet3 setup, and you can skip them by setting "--stage 11" if you have already
50
+ # run those things.
51
+
52
+ local/nnet3/run_ivector_common.sh --stage $stage \
53
+ --train_set $train_set \
54
+ --test_sets "$test_sets" \
55
+ --gmm $gmm \
56
+ --nj $nj \
57
+ --num-threads-ubm 6 \
58
+ --nnet3-affix "$nnet3_affix" || exit 1;
59
+
60
+ gmm_dir=exp/$gmm
61
+ ali_dir=exp/${gmm}_ali_${train_set}_sp
62
+ tree_dir=exp/chain${nnet3_affix}/tree_sp${tree_affix:+_$tree_affix}
63
+ lang=data/lang_chain
64
+ lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_lats
65
+ dir=exp/chain${nnet3_affix}/tdnn${affix:+_$affix}_sp
66
+ train_data_dir=data/${train_set}_sp_hires
67
+ lores_train_data_dir=data/${train_set}_sp
68
+ train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires
69
+
70
+ # if we are using the speed-perturbed data we need to generate
71
+ # alignments for it.
72
+
73
+
74
+ for f in $gmm_dir/final.mdl $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
75
+ $lores_train_data_dir/feats.scp $ali_dir/ali.1.gz; do
76
+ [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
77
+ done
78
+
79
+ # Please take this as a reference on how to specify all the options of
80
+ # local/chain/run_chain_common.sh
81
+ local/chain/run_chain_common.sh --stage $stage \
82
+ --gmm-dir $gmm_dir \
83
+ --ali-dir $ali_dir \
84
+ --lores-train-data-dir ${lores_train_data_dir} \
85
+ --lang $lang \
86
+ --lat-dir $lat_dir \
87
+ --num-leaves 7000 \
88
+ --tree-dir $tree_dir || exit 1;
89
+
90
+ if [ $stage -le 14 ]; then
91
+ echo "$0: creating neural net configs using the xconfig parser";
92
+
93
+ num_targets=$(tree-info $tree_dir/tree | grep num-pdfs | awk '{print $2}')
94
+ learning_rate_factor=$(echo "print (0.5/$xent_regularize)" | python)
95
+ affine_opts="l2-regularize=0.008 dropout-proportion=0.0 dropout-per-dim=true dropout-per-dim-continuous=true"
96
+ tdnnf_opts="l2-regularize=0.008 dropout-proportion=0.0 bypass-scale=0.75"
97
+ linear_opts="l2-regularize=0.008 orthonormal-constraint=-1.0"
98
+ prefinal_opts="l2-regularize=0.008"
99
+ output_opts="l2-regularize=0.002"
100
+ layer_dim=1536
101
+ bottleneck_dim=160
102
+
103
+ mkdir -p $dir/configs
104
+
105
+ cat <<EOF > $dir/configs/network.xconfig
106
+ input dim=100 name=ivector
107
+ input dim=40 name=input
108
+
109
+ # please note that it is important to have input layer with the name=input
110
+ # as the layer immediately preceding the fixed-affine-layer to enable
111
+ # the use of short notation for the descriptor
112
+ fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
113
+
114
+ # the first splicing is moved before the lda layer, so no splicing here
115
+ relu-batchnorm-dropout-layer name=tdnn1 $affine_opts dim=$layer_dim
116
+ tdnnf-layer name=tdnnf2 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=1
117
+ tdnnf-layer name=tdnnf3 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=1
118
+ tdnnf-layer name=tdnnf4 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=1
119
+ tdnnf-layer name=tdnnf5 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=0
120
+ tdnnf-layer name=tdnnf6 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
121
+ tdnnf-layer name=tdnnf7 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
122
+ tdnnf-layer name=tdnnf8 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
123
+ tdnnf-layer name=tdnnf9 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
124
+ tdnnf-layer name=tdnnf10 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
125
+ tdnnf-layer name=tdnnf11 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
126
+ tdnnf-layer name=tdnnf12 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
127
+ tdnnf-layer name=tdnnf13 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
128
+ tdnnf-layer name=tdnnf14 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
129
+ tdnnf-layer name=tdnnf15 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
130
+ tdnnf-layer name=tdnnf16 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
131
+ tdnnf-layer name=tdnnf17 $tdnnf_opts dim=$layer_dim bottleneck-dim=$bottleneck_dim time-stride=3
132
+ linear-component name=prefinal-l dim=256 $linear_opts
133
+
134
+ prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts big-dim=$layer_dim small-dim=256
135
+ output-layer name=output include-log-softmax=false dim=$num_targets $output_opts
136
+
137
+ prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts big-dim=$layer_dim small-dim=256
138
+ output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts
139
+ EOF
140
+ steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
141
+ fi
142
+
143
+ if [ $stage -le 15 ]; then
144
+ if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
145
+ utils/create_split_dir.pl \
146
+ /export/b{09,10,11,12}/$USER/kaldi-data/egs/lyirics-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage
147
+ fi
148
+
149
+ steps/nnet3/chain/train.py --stage $train_stage \
150
+ --cmd "$decode_cmd" \
151
+ --feat.online-ivector-dir $train_ivector_dir \
152
+ --feat.cmvn-opts "--norm-means=false --norm-vars=false" \
153
+ --chain.xent-regularize $xent_regularize \
154
+ --chain.leaky-hmm-coefficient 0.1 \
155
+ --chain.l2-regularize 0.0 \
156
+ --chain.apply-deriv-weights false \
157
+ --chain.lm-opts="--num-extra-lm-states=2000" \
158
+ --egs.dir "$common_egs_dir" \
159
+ --egs.stage $get_egs_stage \
160
+ --egs.opts "--frames-overlap-per-eg 0 --constrained false" \
161
+ --egs.chunk-width $frames_per_eg \
162
+ --trainer.dropout-schedule $dropout_schedule \
163
+ --trainer.add-option="--optimization.memory-compression-level=2" \
164
+ --trainer.num-chunk-per-minibatch 64 \
165
+ --trainer.frames-per-iter 2500000 \
166
+ --trainer.num-epochs 4 \
167
+ --trainer.optimization.num-jobs-initial 6 \
168
+ --trainer.optimization.num-jobs-final 16 \
169
+ --trainer.optimization.initial-effective-lrate 0.00015 \
170
+ --trainer.optimization.final-effective-lrate 0.000015 \
171
+ --trainer.max-param-change 2.0 \
172
+ --cleanup.remove-egs $remove_egs \
173
+ --feat-dir $train_data_dir \
174
+ --tree-dir $tree_dir \
175
+ --lat-dir $lat_dir \
176
+ --dir $dir \
177
+ --use-gpu=wait || exit 1;
178
+
179
+ # --trainer.optimization.num-jobs-initial 3
180
+ # --trainer.optimization.num-jobs-final 16
181
+ # --trainer.num-chunk-per-minibatch 64 \
182
+
183
+ fi
184
+
185
+
186
+
187
+ graph_dir=$dir/graph_3G
188
+ if [ $stage -le 16 ]; then
189
+ # Note: it might appear that this $lang directory is mismatched, and it is as
190
+ # far as the 'topo' is concerned, but this script doesn't read the 'topo' from
191
+ # the lang directory.
192
+ utils/mkgraph.sh --self-loop-scale 1.0 --remove-oov data/lang_3G $dir $graph_dir
193
+ # remove <UNK> from the graph, and convert back to const-FST.
194
+ fstrmsymbols --apply-to-output=true --remove-arcs=true "echo 3|" $graph_dir/HCLG.fst - | \
195
+ fstconvert --fst_type=const > $graph_dir/temp.fst
196
+ mv $graph_dir/temp.fst $graph_dir/HCLG.fst
197
+ fi
198
+
199
+ iter_opts=
200
+ if [ ! -z $decode_iter ]; then
201
+ iter_opts=" --iter $decode_iter "
202
+ fi
203
+ if [ $stage -le 17 ]; then
204
+ rm $dir/.error 2>/dev/null || true
205
+ for decode_set in $test_sets; do
206
+ (
207
+ steps/nnet3/decode.sh \
208
+ --acwt 1.0 \
209
+ --post-decode-acwt 10.0 \
210
+ --nj $decode_nj \
211
+ --cmd "$decode_cmd" $iter_opts \
212
+ --online-ivector-dir \
213
+ exp/nnet3${nnet3_affix}/ivectors_${decode_set}_hires \
214
+ $graph_dir \
215
+ data/${decode_set}_hires \
216
+ $dir/decode_${decode_set}${decode_iter:+_$decode_iter}_3G || exit 1
217
+
218
+ steps/lmrescore.sh --cmd "$decode_cmd" --self-loop-scale 1.0 data/lang_{3G,4G} \
219
+ data/${decode_set}_hires $dir/decode_${decode_set}${decode_iter:+_$decode_iter}_{3G,4G} || exit 1
220
+
221
+ ) || touch $dir/.error &
222
+ done
223
+ wait
224
+ if [ -f $dir/.error ]; then
225
+ echo "$0: something went wrong in decoding"
226
+ exit 1
227
+ fi
228
+ fi
229
+
230
+ exit 0;
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/check_tools.sh ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -u
2
+
3
+ # Copyright 2015 (c) Johns Hopkins University (Jan Trmal <jtrmal@gmail.com>)
4
+
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
12
+ # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
13
+ # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
14
+ # MERCHANTABLITY OR NON-INFRINGEMENT.
15
+ # See the Apache 2 License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ [ -f ./path.sh ] && . ./path.sh
19
+
20
+ command -v uconv &>/dev/null \
21
+ || { echo >&2 "uconv not found on PATH. You will have to install ICU4C"; exit 1; }
22
+
23
+ command -v ngram &>/dev/null \
24
+ || { echo >&2 "srilm not found on PATH. Please use the script $KALDI_ROOT/tools/extras/install_srilm.sh to install it"; exit 1; }
25
+
26
+ if [ -z ${LIBLBFGS} ]; then
27
+ echo >&2 "SRILM is not compiled with the support of MaxEnt models."
28
+ echo >&2 "You should use the script in \$KALDI_ROOT/tools/install_srilm.sh"
29
+ echo >&2 "which will take care of compiling the SRILM with MaxEnt support"
30
+ exit 1;
31
+ fi
32
+
33
+ sox=`command -v sox 2>/dev/null` \
34
+ || { echo >&2 "sox not found on PATH. Please install it manually (you will need version 14.4.0 and higher)."; exit 1; }
35
+
36
+ # If sox is found on path, check if the version is correct
37
+ if [ ! -z "$sox" ]; then
38
+ sox_version=`$sox --version 2>&1| head -1 | sed -e 's?.*: ??' -e 's?.* ??'`
39
+ if [[ ! $sox_version =~ v14.4.* ]]; then
40
+ echo "Unsupported sox version $sox_version found on path. You will need version v14.4.0 and higher."
41
+ exit 1
42
+ fi
43
+ fi
44
+
45
+ command -v phonetisaurus-align &>/dev/null \
46
+ || { echo >&2 "Phonetisaurus not found on PATH. Please use the script $KALDI_ROOT/tools/extras/install_phonetisaurus.sh to install it"; exit 1; }
47
+
48
+ exit 0
49
+
50
+
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/nnet3/run_ivector_common.sh ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ set -e -o pipefail
4
+
5
+ # This script is called from scripts like local/nnet3/run_tdnn.sh and
6
+ # local/chain/run_tdnn.sh (and may eventually be called by more scripts). It
7
+ # contains the common feature preparation and iVector-related parts of the
8
+ # script. See those scripts for examples of usage.
9
+
10
+
11
+ stage=1
12
+ nj=39
13
+ train_set=train_cleaned # you might set this to e.g. train.
14
+ test_sets="test dev"
15
+ gmm=tri3b_cleaned # This specifies a GMM-dir from the features of the type you're training the system on;
16
+ # it should contain alignments for 'train_set'.
17
+
18
+ num_threads_ubm=32
19
+ nnet3_affix=_train_cleaned # affix for exp/nnet3 directory to put iVector stuff in (e.g.
20
+ # in the tedlium recip it's _cleaned).
21
+ echo "$0 $@" # Print the command line for logging
22
+
23
+ . ./cmd.sh
24
+ . ./path.sh
25
+ . utils/parse_options.sh
26
+
27
+ gmm_dir=exp/${gmm}
28
+ ali_dir=exp/${gmm}_ali_${train_set}_sp
29
+
30
+ for f in data/${train_set}/feats.scp ${gmm_dir}/final.mdl; do
31
+ if [ ! -f $f ]; then
32
+ echo "$0: expected file $f to exist"
33
+ exit 1
34
+ fi
35
+ done
36
+
37
+ if [ $stage -le 2 ] && [ -f data/${train_set}_sp_hires/feats.scp ]; then
38
+ echo "$0: data/${train_set}_sp_hires/feats.scp already exists."
39
+ echo " ... Please either remove it, or rerun this script with stage > 2."
40
+ exit 1
41
+ fi
42
+
43
+
44
+ if [ $stage -le 1 ]; then
45
+ echo "$0: preparing directory for speed-perturbed data"
46
+ utils/data/perturb_data_dir_speed_3way.sh data/${train_set} data/${train_set}_sp
47
+ fi
48
+
49
+ if [ $stage -le 2 ]; then
50
+ echo "$0: creating high-resolution MFCC features"
51
+
52
+ # this shows how you can split across multiple file-systems. we'll split the
53
+ # MFCC dir across multiple locations. You might want to be careful here, if you
54
+ # have multiple copies of Kaldi checked out and run the same recipe, not to let
55
+ # them overwrite each other.
56
+ mfccdir=data/${train_set}_sp_hires/data
57
+ if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $mfccdir/storage ]; then
58
+ utils/create_split_dir.pl /export/b0{5,6,7,8}/$USER/kaldi-data/egs/wsj-$(date +'%m_%d_%H_%M')/s5/$mfccdir/storage $mfccdir/storage
59
+ fi
60
+
61
+ for datadir in ${train_set}_sp ${test_sets}; do
62
+ utils/copy_data_dir.sh data/$datadir data/${datadir}_hires
63
+ done
64
+
65
+ # do volume-perturbation on the training data prior to extracting hires
66
+ # features; this helps make trained nnets more invariant to test data volume.
67
+ utils/data/perturb_data_dir_volume.sh data/${train_set}_sp_hires
68
+
69
+ for datadir in ${train_set}_sp ${test_sets}; do
70
+ steps/make_mfcc.sh --nj $nj --mfcc-config conf/mfcc_hires.conf \
71
+ --cmd "$train_cmd" data/${datadir}_hires
72
+ steps/compute_cmvn_stats.sh data/${datadir}_hires
73
+ utils/fix_data_dir.sh data/${datadir}_hires
74
+ done
75
+ fi
76
+
77
+ if [ $stage -le 3 ]; then
78
+ echo "$0: computing a subset of data to train the diagonal UBM."
79
+
80
+ mkdir -p exp/nnet3${nnet3_affix}/diag_ubm
81
+ temp_data_root=exp/nnet3${nnet3_affix}/diag_ubm
82
+
83
+ # train a diagonal UBM using a subset of about a quarter of the data
84
+ num_utts_total=$(wc -l <data/${train_set}_sp_hires/utt2spk)
85
+ num_utts=$[$num_utts_total/4]
86
+ utils/data/subset_data_dir.sh data/${train_set}_sp_hires \
87
+ $num_utts ${temp_data_root}/${train_set}_sp_hires_subset
88
+
89
+ echo "$0: computing a PCA transform from the hires data."
90
+ steps/online/nnet2/get_pca_transform.sh --cmd "$train_cmd" \
91
+ --splice-opts "--left-context=3 --right-context=3" \
92
+ --max-utts 10000 --subsample 2 \
93
+ ${temp_data_root}/${train_set}_sp_hires_subset \
94
+ exp/nnet3${nnet3_affix}/pca_transform
95
+
96
+ echo "$0: training the diagonal UBM."
97
+ # Use 512 Gaussians in the UBM.
98
+ steps/online/nnet2/train_diag_ubm.sh --cmd "$train_cmd" --nj 30 \
99
+ --num-frames 700000 \
100
+ --num-threads $num_threads_ubm \
101
+ ${temp_data_root}/${train_set}_sp_hires_subset 512 \
102
+ exp/nnet3${nnet3_affix}/pca_transform exp/nnet3${nnet3_affix}/diag_ubm
103
+
104
+ fi
105
+
106
+ if [ $stage -le 4 ]; then
107
+ # Train the iVector extractor. Use all of the speed-perturbed data since iVector extractors
108
+ # can be sensitive to the amount of data. The script defaults to an iVector dimension of
109
+ # 100.
110
+ echo "$0: training the iVector extractor"
111
+ # in steps/online/nnet2/train_ivector_extractor.sh calculate nj_full as
112
+ # num_threads * num_processes
113
+ steps/online/nnet2/train_ivector_extractor.sh --cmd "$train_cmd" --nj 10 \
114
+ --num_threads 1 --num_processes 1 \
115
+ data/${train_set}_sp_hires exp/nnet3${nnet3_affix}/diag_ubm exp/nnet3${nnet3_affix}/extractor || exit 1;
116
+ fi
117
+
118
+ if [ $stage -le 5 ]; then
119
+ # note, we don't encode the 'max2' in the name of the ivectordir even though
120
+ # that's the data we extract the ivectors from, as it's still going to be
121
+ # valid for the non-'max2' data; the utterance list is the same.
122
+ ivectordir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires
123
+ if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $ivectordir/storage ]; then
124
+ utils/create_split_dir.pl /export/b0{5,6,7,8}/$USER/kaldi-data/egs/wsj-$(date +'%m_%d_%H_%M')/s5/$ivectordir/storage $ivectordir/storage
125
+ fi
126
+
127
+ # We now extract iVectors on the speed-perturbed training data . With
128
+ # --utts-per-spk-max 2, the script pairs the utterances into twos, and treats
129
+ # each of these pairs as one speaker; this gives more diversity in iVectors..
130
+ # Note that these are extracted 'online' (they vary within the utterance).
131
+
132
+ # Having a larger number of speakers is helpful for generalization, and to
133
+ # handle per-utterance decoding well (the iVector starts at zero at the beginning
134
+ # of each pseudo-speaker).
135
+ temp_data_root=${ivectordir}
136
+ utils/data/modify_speaker_info.sh --utts-per-spk-max 2 \
137
+ data/${train_set}_sp_hires ${temp_data_root}/${train_set}_sp_hires_max2
138
+
139
+ steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj $nj \
140
+ ${temp_data_root}/${train_set}_sp_hires_max2 \
141
+ exp/nnet3${nnet3_affix}/extractor $ivectordir
142
+
143
+ # Also extract iVectors for the test data, but in this case we don't need the speed
144
+ # perturbation (sp).
145
+ for data in ${test_sets}; do
146
+ nspk=$(wc -l <data/${data}_hires/spk2utt)
147
+ steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj "${nspk}" \
148
+ data/${data}_hires exp/nnet3${nnet3_affix}/extractor \
149
+ exp/nnet3${nnet3_affix}/ivectors_${data}_hires
150
+ done
151
+ fi
152
+
153
+ if [ -f data/${train_set}_sp/feats.scp ] && [ $stage -le 8 ]; then
154
+ echo "$0: $feats already exists. Refusing to overwrite the features "
155
+ echo " to avoid wasting time. Please remove the file and continue if you really mean this."
156
+ exit 1;
157
+ fi
158
+
159
+
160
+ if [ $stage -le 6 ]; then
161
+ echo "$0: preparing directory for low-resolution speed-perturbed data (for alignment)"
162
+ utils/data/perturb_data_dir_speed_3way.sh \
163
+ data/${train_set} data/${train_set}_sp
164
+ fi
165
+
166
+ if [ $stage -le 7 ]; then
167
+ echo "$0: making MFCC features for low-resolution speed-perturbed data (needed for alignments)"
168
+ steps/make_mfcc.sh --nj $nj \
169
+ --cmd "$train_cmd" data/${train_set}_sp
170
+ steps/compute_cmvn_stats.sh data/${train_set}_sp
171
+ echo "$0: fixing input data-dir to remove nonexistent features, in case some "
172
+ echo ".. speed-perturbed segments were too short."
173
+ utils/fix_data_dir.sh data/${train_set}_sp
174
+ fi
175
+
176
+ if [ $stage -le 8 ]; then
177
+ if [ -f $ali_dir/ali.1.gz ]; then
178
+ echo "$0: alignments in $ali_dir appear to already exist. Please either remove them "
179
+ echo " ... or use a later --stage option."
180
+ exit 1
181
+ fi
182
+ echo "$0: aligning with the perturbed low-resolution data"
183
+ steps/align_fmllr.sh --nj $nj --cmd "$train_cmd" \
184
+ data/${train_set}_sp data/lang $gmm_dir $ali_dir
185
+ fi
186
+
187
+
188
+ exit 0;
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/prepare_data.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import argparse
3
+ from os.path import join, exists, isfile
4
+ from os import makedirs, listdir
5
+ import re
6
+ import hashlib
7
+
8
+
9
+ class DataSet:
10
+ def __init__(self, name, workspace):
11
+ self.segments = []
12
+ self.spk2gender = []
13
+ self.text = []
14
+ self.utt2spk = []
15
+ self.wavscp = []
16
+ self.workspace = join(workspace, name)
17
+
18
+ def add_utterance(self, utt, recording):
19
+
20
+ text = utt["text"]
21
+ arrangement, performance, country, gender, user = recording[:-4].split("-")
22
+
23
+ # the following mapping is necessary for errors in gender in country IN
24
+ insensitive_none = re.compile(re.escape('none'), re.IGNORECASE)
25
+
26
+ gender = insensitive_none.sub('', utt["gender"])
27
+ spk = "{}{}".format(insensitive_none.sub('', gender).upper(), insensitive_none.sub('', user))
28
+
29
+ rec_id = recording[:-4]
30
+ utt_id = "{}-{}-{}-{}-{}-{:03}".format(spk, arrangement, performance, country, gender.upper(), utt["index"])
31
+
32
+ start = utt["start"]
33
+ end = utt["end"]
34
+
35
+ wavpath = join(country, "{}{}".format(country, "Vocals"), recording)
36
+
37
+ self._add_segment(utt_id, rec_id, start, end)
38
+ self._add_spk2gender(spk, gender)
39
+ self._add_text(utt_id, text)
40
+ self._add_utt2spk(utt_id, spk)
41
+ self._add_wavscp(rec_id, wavpath)
42
+
43
+ def _add_segment(self, rec_id, utt_id, start, end):
44
+ self.segments.append("{} {} {:.3f} {:.3f}".format(rec_id, utt_id, start, end))
45
+
46
+ def _add_spk2gender(self, spk, gender):
47
+ self.spk2gender.append("{} {}".format(spk, gender))
48
+
49
+ def _add_text(self, utt_id, text):
50
+ self.text.append("{} {}".format(utt_id, text))
51
+
52
+ def _add_utt2spk(self, utt_id, spk):
53
+ self.utt2spk.append("{} {}".format(utt_id, spk))
54
+
55
+ def _add_wavscp(self, rec_id, wavpath):
56
+ self.wavscp.append("{} sox wav/{} -G -t wav -r 16000 -c 1 - remix 1 | ".format(rec_id, wavpath))
57
+
58
+ def list2file(self, outfile, list_data):
59
+ list_data = list(set(list_data))
60
+ with open(outfile, "w") as f:
61
+ for line in list_data:
62
+ f.write("{}\n".format(line))
63
+
64
+ def save(self):
65
+ if not exists(self.workspace):
66
+ makedirs(self.workspace)
67
+ self.list2file(join(self.workspace, "spk2gender"), sorted(self.spk2gender))
68
+ self.list2file(join(self.workspace, "text"), sorted(self.text))
69
+ self.list2file(join(self.workspace, "wav.scp"), sorted(self.wavscp))
70
+ self.list2file(join(self.workspace, "utt2spk"), sorted(self.utt2spk))
71
+ self.list2file(join(self.workspace, "segments"), sorted(self.segments))
72
+
73
+
74
+ def read_json(filepath):
75
+ try: # Read the json
76
+ with open(filepath) as data_file:
77
+ data = json.load(data_file)
78
+ except json.decoder.JSONDecodeError: # Json has an extra first line. Error when was created
79
+ data = []
80
+
81
+ return data
82
+
83
+
84
+ def map_rec2chec(db_path, countries):
85
+ """
86
+ Method read all the original audio tracks and create a dict {<checksum>: <recording>}
87
+ :param db_path: string, path to root of DAMP Sing!
88
+ :return: dict
89
+ """
90
+ rec2chec = {}
91
+ for country in countries:
92
+ recordings = [f for f in listdir(join(db_path, country, country + "Vocals")) if f.endswith(".m4a")]
93
+ for record in recordings:
94
+ rec2chec[hashlib.md5(open(join(db_path, country, country + "Vocals", record), 'rb').read()).hexdigest()] = record
95
+
96
+ return rec2chec
97
+
98
+
99
+ def main(args):
100
+ db_path = args.db_path
101
+ workspace = args.workspace
102
+ utts_path = args.utterances
103
+ dset = args.dset
104
+
105
+ countries = ["GB"]
106
+ countries += ["US", "AU"] if dset in ["train3", "train30"] else []
107
+ countries += ['AE', 'AR', 'BR', 'CL', 'CN', 'DE', 'ES', 'FR', 'HU',
108
+ 'ID', 'IN', 'IQ', 'IR', 'IT', 'JP', 'KR', 'MX', 'MY',
109
+ 'NO', 'PH', 'PT', 'RU', 'SA', 'SG', 'TH', 'VN', 'ZA'] if dset in ["train30"] else []
110
+
111
+ performances = map_rec2chec(db_path, countries)
112
+ utterances = read_json(utts_path)
113
+ dataset = DataSet(dset, workspace)
114
+
115
+ for utt in utterances:
116
+ dataset.add_utterance(utt, performances[utt["wavfile"]])
117
+
118
+ dataset.save()
119
+
120
+
121
+ if __name__ == '__main__':
122
+ parser = argparse.ArgumentParser()
123
+ parser.add_argument("workspace", type=str, help="Path where the output files will be saved")
124
+ parser.add_argument("db_path", type=str, help="Path to DAMP 300x30x2 database")
125
+ parser.add_argument("utterances", type=str, help="Path to utterance details in json format",
126
+ default="metadata.json")
127
+ parser.add_argument("dset", type=str, help="Name of the dataset")
128
+
129
+ args = parser.parse_args()
130
+ main(args)
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/prepare_dict.sh ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #adapted from ami and chime5 dict preparation script
4
+ #Author: Gerardo Roa
5
+
6
+ # Begin configuration section.
7
+ words=5000
8
+ # End configuration section
9
+
10
+ echo "$0 $@" # Print the command line for logging
11
+
12
+ if [ -f path.sh ]; then . ./path.sh; fi
13
+ . utils/parse_options.sh || exit 1;
14
+
15
+ # The parts of the output of this that will be needed are
16
+ # [in data/local/dict/ ]
17
+ # lexicon.txt
18
+ # extra_questions.txt
19
+ # nonsilence_phones.txt
20
+ # optional_silence.txt
21
+ # silence_phones.txt
22
+
23
+ mkdir -p data
24
+
25
+
26
+
27
+ dir=data/local/dict
28
+ mkdir -p $dir
29
+
30
+ echo "$0: Getting CMU dictionary"
31
+ if [ ! -f $dir/cmudict.done ]; then
32
+ [ -d $dir/cmudict ] && rm -rf $dir/cmudict
33
+ svn co https://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict $dir/cmudict
34
+ touch $dir/cmudict.done
35
+ fi
36
+
37
+ echo "$0: Preparing files in $dir"
38
+ # Silence phones
39
+ for w in SIL SPN; do echo $w; done > $dir/silence_phones.txt
40
+ echo SIL > $dir/optional_silence.txt
41
+
42
+
43
+ # For this setup we're discarding stress.
44
+ cat $dir/cmudict/cmudict-0.7b.symbols | \
45
+ perl -ne 's:[0-9]::g; s:\r::; print lc($_)' | \
46
+ tr a-z A-Z | \
47
+ sort -u > $dir/nonsilence_phones.txt
48
+
49
+ # An extra question will be added by including the silence phones in one class.
50
+ paste -d ' ' -s $dir/silence_phones.txt > $dir/extra_questions.txt
51
+
52
+
53
+ grep -v ';;;' $dir/cmudict/cmudict-0.7b |\
54
+ uconv -f latin1 -t utf-8 -x Any-Lower |\
55
+ perl -ne 's:(\S+)\(\d+\) :$1 :; s: : :; print;' |\
56
+ perl -ne '@F = split " ",$_,2; $F[1] =~ s/[0-9]//g; print "$F[0] $F[1]";' \
57
+ > $dir/lexicon1_raw_nosil.txt || exit 1;
58
+
59
+
60
+ # Add prons for laughter, noise, oov
61
+ for w in `grep -v sil $dir/silence_phones.txt`; do
62
+ echo "[$w] $w"
63
+ done | cat - $dir/lexicon1_raw_nosil.txt > $dir/lexicon2_raw.txt || exit 1;
64
+
65
+
66
+ # we keep all words from the cmudict in the lexicon
67
+ # might reduce OOV rate on dev and test
68
+ cat $dir/lexicon2_raw.txt \
69
+ <( echo "mm m"
70
+ echo "<unk> spn" \
71
+ ) | sed 's/[\t ]/\t/' | tr a-z A-Z | sort -u > $dir/iv_lexicon.txt
72
+
73
+
74
+ cat data/local/corpus.txt | \
75
+ awk '{for (n=1;n<=NF;n++){ count[$n]++; } } END { for(n in count) { print count[n], n; }}' | \
76
+ sort -nr > $dir/word_counts_b
77
+
78
+
79
+ # Select the N numbers of words increasingly in order to select all the words with same count
80
+
81
+ vocab_size=0
82
+ start_line=3 # first two are <s> and </s>
83
+ touch $dir/word_list
84
+
85
+ while [ "$vocab_size" -le "$words" ]; do
86
+ current_count=`sed "${start_line}q;d" $dir/word_counts_b | awk '{print $1}'`
87
+ cat $dir/word_counts_b | grep "^$current_count " | awk '{print $2}' >> $dir/word_list
88
+ vocab_size=`cat $dir/word_list | wc -l`
89
+ start_line=$((vocab_size + 1 ))
90
+ done
91
+
92
+
93
+ head -n $vocab_size $dir/word_counts_b > $dir/word_counts
94
+ sort -u $dir/word_list > $dir/word_list_sorted
95
+
96
+
97
+ awk '{print $1}' $dir/iv_lexicon.txt | \
98
+ perl -e '($word_counts)=@ARGV;
99
+ open(W, "<$word_counts")||die "opening word-counts $word_counts";
100
+ while(<STDIN>) { chop; $seen{$_}=1; }
101
+ while(<W>) {
102
+ ($c,$w) = split;
103
+ if (!defined $seen{$w}) { print; }
104
+ } ' $dir/word_counts > $dir/oov_counts.txt
105
+
106
+
107
+ echo "*Highest-count OOVs (including fragments) are:"
108
+ head -n 10 $dir/oov_counts.txt
109
+ echo "*Highest-count OOVs (excluding fragments) are:"
110
+ grep -v -E '^-|-$' $dir/oov_counts.txt | head -n 10 || true
111
+
112
+
113
+ echo "*Training a G2P and generating missing pronunciations"
114
+ mkdir -p $dir/g2p/
115
+
116
+ if [ -e $dir/g2p/g2p.fst ]
117
+ then
118
+ echo "$0: Phonetisaurus exist. $dir/g2p/g2p.fst will be used"
119
+ else
120
+ phonetisaurus-align --input=$dir/iv_lexicon.txt --ofile=$dir/g2p/aligned_lexicon.corpus
121
+ ngram-count -order 4 -kn-modify-counts-at-end -ukndiscount\
122
+ -gt1min 0 -gt2min 0 -gt3min 0 -gt4min 0 \
123
+ -text $dir/g2p/aligned_lexicon.corpus -lm $dir/g2p/aligned_lexicon.arpa
124
+ phonetisaurus-arpa2wfst --lm=$dir/g2p/aligned_lexicon.arpa --ofile=$dir/g2p/g2p.fst
125
+ fi
126
+
127
+ awk '{print $2}' $dir/oov_counts.txt > $dir/oov_words.txt
128
+ phonetisaurus-apply --nbest 2 --model $dir/g2p/g2p.fst --thresh 5 --accumulate \
129
+ --word_list $dir/oov_words.txt > $dir/oov_lexicon.txt
130
+
131
+
132
+ ## We join pronunciation with the selected words to create lexicon.txt
133
+ cat $dir/oov_lexicon.txt $dir/iv_lexicon.txt | sort -u > $dir/lexicon1_plus_g2p.txt
134
+ join $dir/lexicon1_plus_g2p.txt $dir/word_list_sorted > $dir/lexicon.txt
135
+
136
+ echo "<UNK> SPN" >> $dir/lexicon.txt
137
+
138
+ ## The next section is again just for debug purposes
139
+ ## to show words for which the G2P failed
140
+ rm -f $dir/lexiconp.txt 2>null; # can confuse later script if this exists.
141
+ awk '{print $1}' $dir/lexicon.txt | \
142
+ perl -e '($word_counts)=@ARGV;
143
+ open(W, "<$word_counts")||die "opening word-counts $word_counts";
144
+ while(<STDIN>) { chop; $seen{$_}=1; }
145
+ while(<W>) {
146
+ ($c,$w) = split;
147
+ if (!defined $seen{$w}) { print; }
148
+ } ' $dir/word_counts > $dir/oov_counts.g2p.txt
149
+
150
+ echo "*Highest-count OOVs (including fragments) after G2P are:"
151
+ head -n 10 $dir/oov_counts.g2p.txt
152
+
153
+ utils/validate_dict_dir.pl $dir
154
+ exit 0;
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/score.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ steps/scoring/score_kaldi_wer.sh "$@"
DSing/Kaldi-Dsing-task/DSing Kaldi Recipe/dsing/s5/local/train_lms_srilm.sh ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Copyright (c) 2017 Johns Hopkins University (Author: Yenda Trmal, Shinji Watanabe)
3
+ # Apache 2.0
4
+
5
+ export LC_ALL=C
6
+
7
+ # Begin configuration section.
8
+ words_file=
9
+ train_text=
10
+ dev_text=
11
+ oov_symbol="<UNK>"
12
+ # End configuration section
13
+
14
+ echo "$0 $@"
15
+
16
+ [ -f path.sh ] && . ./path.sh
17
+ . ./utils/parse_options.sh || exit 1
18
+
19
+ echo "-------------------------------------"
20
+ echo "Building an SRILM language model "
21
+ echo "-------------------------------------"
22
+
23
+ if [ $# -ne 2 ] ; then
24
+ echo "Incorrect number of parameters. "
25
+ echo "Script has to be called like this:"
26
+ echo " $0 [switches] <datadir> <tgtdir>"
27
+ echo "For example: "
28
+ echo " $0 data data/srilm"
29
+ echo "The allowed switches are: "
30
+ echo " words_file=<word_file|> word list file -- data/lang/words.txt by default"
31
+ echo " train_text=<train_text|> data/train/text is used in case when not specified"
32
+ echo " dev_text=<dev_text|> last 10 % of the train text is used by default"
33
+ echo " oov_symbol=<unk_sumbol|<UNK>> symbol to use for oov modeling -- <UNK> by default"
34
+ exit 1
35
+ fi
36
+
37
+ datadir=$1
38
+ tgtdir=$2
39
+
40
+ ##End of configuration
41
+ loc=`which ngram-count`;
42
+ if [ -z $loc ]; then
43
+ echo >&2 "You appear to not have SRILM tools installed, either on your path,"
44
+ echo >&2 "Use the script \$KALDI_ROOT/tools/install_srilm.sh to install it."
45
+ exit 1
46
+ fi
47
+
48
+ # Prepare the destination directory
49
+ mkdir -p $tgtdir
50
+
51
+ for f in $words_file $train_text $dev_text; do
52
+ [ ! -s $f ] && echo "No such file $f" && exit 1;
53
+ done
54
+
55
+ [ -z $words_file ] && words_file=$datadir/lang/words.txt
56
+ if [ ! -z "$train_text" ] && [ -z "$dev_text" ] ; then
57
+ nr=`cat $train_text | wc -l`
58
+ nr_dev=$(($nr / 10 ))
59
+ nr_train=$(( $nr - $nr_dev ))
60
+ orig_train_text=$train_text
61
+ head -n $nr_train $train_text > $tgtdir/train_text
62
+ tail -n $nr_dev $train_text > $tgtdir/dev_text
63
+
64
+ train_text=$tgtdir/train_text
65
+ dev_text=$tgtdir/dev_text
66
+ echo "Using words file: $words_file"
67
+ echo "Using train text: 9/10 of $orig_train_text"
68
+ echo "Using dev text : 1/10 of $orig_train_text"
69
+ elif [ ! -z "$train_text" ] && [ ! -z "$dev_text" ] ; then
70
+ echo "Using words file: $words_file"
71
+ echo "Using train text: $train_text"
72
+ echo "Using dev text : $dev_text"
73
+ train_text=$train_text
74
+ dev_text=$dev_text
75
+ else
76
+ train_text=$datadir/train/text
77
+ dev_text=$datadir/dev2h/text
78
+ echo "Using words file: $words_file"
79
+ echo "Using train text: $train_text"
80
+ echo "Using dev text : $dev_text"
81
+
82
+ fi
83
+
84
+ [ ! -f $words_file ] && echo >&2 "File $words_file must exist!" && exit 1
85
+ [ ! -f $train_text ] && echo >&2 "File $train_text must exist!" && exit 1
86
+ [ ! -f $dev_text ] && echo >&2 "File $dev_text must exist!" && exit 1
87
+
88
+
89
+ # Extract the word list from the training dictionary; exclude special symbols
90
+ sort $words_file | awk '{print $1}' | grep -v '\#0' | grep -v '<eps>' | grep -v -F "$oov_symbol" > $tgtdir/vocab
91
+ if (($?)); then
92
+ echo "Failed to create vocab from $words_file"
93
+ exit 1
94
+ else
95
+ # wc vocab # doesn't work due to some encoding issues
96
+ echo vocab contains `cat $tgtdir/vocab | perl -ne 'BEGIN{$l=$w=0;}{split; $w+=$#_; $w++; $l++;}END{print "$l lines, $w words\n";}'`
97
+ fi
98
+
99
+ # corpus file has <s> <\s> tag; remove it
100
+ sed -e 's/^\w*\ *//' -e 's/ \+[^ ]\+$//' $train_text | sort -u | \
101
+ perl -ane 'print join(" ", @F[1..$#F]) . "\n" if @F > 1' > $tgtdir/train.txt
102
+ if (($?)); then
103
+ echo "Failed to create $tgtdir/train.txt from $train_text"
104
+ exit 1
105
+ else
106
+ echo "Removed first and last word (<s> <\s> tags) from every line of $train_text"
107
+ # wc text.train train.txt # doesn't work due to some encoding issues
108
+ echo $train_text contains `cat $train_text | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $w--; $s++;}END{print "$w words, $s sentences\n";}'`
109
+ echo train.txt contains `cat $tgtdir/train.txt | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $s++;}END{print "$w words, $s sentences\n";}'`
110
+ fi
111
+
112
+ # data/dev/text
113
+ cat $dev_text | cut -d ' ' -f 2- > $tgtdir/dev.txt
114
+ if (($?)); then
115
+ echo "Failed to create $tgtdir/dev.txt from $dev_text"
116
+ exit 1
117
+ else
118
+ echo "Removed first word (uid) from every line of $dev_text"
119
+ # wc text.train train.txt # doesn't work due to some encoding issues
120
+ echo $dev_text contains `cat $dev_text | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $w--; $s++;}END{print "$w words, $s sentences\n";}'`
121
+ echo $tgtdir/dev.txt contains `cat $tgtdir/dev.txt | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $s++;}END{print "$w words, $s sentences\n";}'`
122
+ fi
123
+
124
+
125
+ if [ ! -z ${LIBLBFGS} ]; then
126
+ #please note that if the switch -map-unk "$oov_symbol" is used with -maxent-convert-to-arpa, ngram-count will segfault
127
+ #instead of that, we simply output the model in the maxent format and convert it using the "ngram"
128
+ echo "-------------------"
129
+ echo "Maxent 3grams"
130
+ echo "-------------------"
131
+ sed 's/'${oov_symbol}'/<unk>/g' $tgtdir/train.txt | \
132
+ ngram-count -lm - -order 3 -text - -vocab $tgtdir/vocab -unk -sort -maxent -maxent-convert-to-arpa|\
133
+ ngram -lm - -order 3 -unk -map-unk "$oov_symbol" -prune-lowprobs -write-lm - |\
134
+ sed 's/<unk>/'${oov_symbol}'/g' | gzip -c > $tgtdir/3gram.me.gz || exit 1
135
+
136
+ echo "-------------------"
137
+ echo "Maxent 4grams"
138
+ echo "-------------------"
139
+ sed 's/'${oov_symbol}'/<unk>/g' $tgtdir/train.txt | \
140
+ ngram-count -lm - -order 4 -text - -vocab $tgtdir/vocab -unk -sort -maxent -maxent-convert-to-arpa|\
141
+ ngram -lm - -order 4 -unk -map-unk "$oov_symbol" -prune-lowprobs -write-lm - |\
142
+ sed 's/<unk>/'${oov_symbol}'/g' | gzip -c > $tgtdir/4gram.me.gz || exit 1
143
+ else
144
+ echo >&2 "SRILM is not compiled with the support of MaxEnt models."
145
+ echo >&2 "You should use the script in \$KALDI_ROOT/tools/install_srilm.sh"
146
+ echo >&2 "which will take care of compiling the SRILM with MaxEnt support"
147
+ exit 1;
148
+ fi
149
+
150
+
151
+ echo "--------------------"
152
+ echo "Computing perplexity"
153
+ echo "--------------------"
154
+ (
155
+ for f in $tgtdir/3gram* ; do ( echo $f; ngram -order 3 -lm $f -unk -map-unk "$oov_symbol" -prune-lowprobs -ppl $tgtdir/dev.txt ) | paste -s -d ' ' ; done
156
+ for f in $tgtdir/4gram* ; do ( echo $f; ngram -order 4 -lm $f -unk -map-unk "$oov_symbol" -prune-lowprobs -ppl $tgtdir/dev.txt ) | paste -s -d ' ' ; done
157
+ ) | sort -r -n -k 15,15g | column -t | tee $tgtdir/perplexities.txt
158
+
159
+ echo "The perlexity scores report is stored in $tgtdir/perplexities.txt "
160
+ echo ""
161
+
162
+ for best_ngram in {3,4}gram ; do
163
+ outlm=best_${best_ngram}.gz
164
+ lmfilename=$(grep "${best_ngram}" $tgtdir/perplexities.txt | head -n 1 | cut -f 1 -d ' ')
165
+ echo "$outlm -> $lmfilename"
166
+ (cd $tgtdir; rm -f $outlm; ln -sf $(basename $lmfilename) $outlm )
167
+ done
DSing/sing_300x30x2/AU/AUVocals/101935856_2505827-1179367744_1581430405-AU-M-411494616.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a6ab9c06471bfa6c93f888d273493802098fdc68b3bb0d1889643e766d4d74d
3
+ size 19851988
DSing/sing_300x30x2/AU/AUVocals/1127321777_3089957-1008354660_1549665636-AU-F-1327123746.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:783a7784e5908456559ff548d5908d770b10786b0f17bed7d9343b19872e8071
3
+ size 19646892
DSing/sing_300x30x2/AU/AUVocals/119790220_1667353-120933022_1544151714-AU-F-120928760.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2f935045040000def435bef52a4d00839b9e427262e1cc24c9184e9476fa000
3
+ size 22572624
DSing/sing_300x30x2/AU/AUVocals/119790220_263436-441829487_1587105890-AU-F-441829569.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c4e4ed16be2ece15454fbc6e8f5ac1226441d97057158474572b84e0d3bf19e
3
+ size 26122492
DSing/sing_300x30x2/AU/AUVocals/17119916_667134-406514963_1571668143-AU-M-757521787.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:534b98c8aa0d29b3466e9a2eb77bdd6bd332c70c5fafc1d42e36b65c188ecd0f
3
+ size 21085804
DSing/sing_300x30x2/AU/AUVocals/3217450_3217450-1193001215_1411682977-AU-F-126106818.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:451e40d9ddf17ff8ddb80c75d7bdc5de99d2d9abfa4424d41c9abaabaf6537ab
3
+ size 13312460
DSing/sing_300x30x2/AU/AUVocals/3219205_3219205-32449093_1630217042-AU-F-1336713592.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee921ace631e99fe817cb8f10bab1e59704fa18651016da592d670e749213b65
3
+ size 25814872
DSing/sing_300x30x2/AU/AUVocals/331426834_128031-55256932_1613332221-AU-M-55260049.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77a5d69b7cddb5ccd70035709610eb7704cdc28fa5c6fdeeea24c5c74601249d
3
+ size 21964076
DSing/sing_300x30x2/AU/AUVocals/3448294_3448294-1042608402_1487495573-AU-M-524731806.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50bd730bce8dc6afafcf53eeb8eaac8af59530081172a309ed55beb5c6e0e314
3
+ size 17350920
DSing/sing_300x30x2/AU/AUVocals/3582632_3582632-425532491_1602008190-AU-F-947968160.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a454b96acb8f2e98fc7300c220b72a5b190c9763f183a0cd47fd30ab9d7a1bdd
3
+ size 14556724
DSing/sing_300x30x2/AU/AUVocals/366321445_101397-1340932642_1567244952-AU-M-429038226.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1b6f02e3434150af66e153e66d42cfc4515fdfeb13e178f9754b2718853c674
3
+ size 27624916
DSing/sing_300x30x2/AU/AUVocals/3703714_232358-541252383_1457684489-AU-F-521014707.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f98c01a0f3bfdd2579ea8efb9f1026d117c7f8b7c24571d173fa1b2fcde3305
3
+ size 5232812
DSing/sing_300x30x2/AU/AUVocals/3769302_3769302-1088349686_1621899122-AU-F-376535394.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57df82205a371c417491a92a1a306d466eccd9df9eb06ecb6caa0cd99af9a469
3
+ size 23056764
DSing/sing_300x30x2/AU/AUVocals/3769415_3769415-550188297_1562374359-AU-M-180102765.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0d943949bfed3f0f5c3881ea02b2dec8ef204715842390a0f6b88f4dd32a3e4
3
+ size 26210540
DSing/sing_300x30x2/AU/AUVocals/3769646_3769646-489322685_198913142-AU-F-210384260.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:797af46859f47d2f03cdcea7915e0d715daab3d34fa1cab219866c4f93392b31
3
+ size 20534096
DSing/sing_300x30x2/AU/AUVocals/3769825_3769825-1221818620_1608937070-AU-F-1304504139.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40f17526d9a53c59c30ee94f9842c95e7eda9a3c9e35b49d805f7271b7547fb2
3
+ size 18825480
DSing/sing_300x30x2/AU/AUVocals/3770436_3770436-32449093_1621119562-AU-F-1336713592.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:560a5fa42ee05e0b2befdc8badb1f2275cf7561b06465a07a96a43be7d866bc4
3
+ size 22075532
DSing/sing_300x30x2/AU/AUVocals/3770715_3770715-1004365916_1631793249-AU-F-140937561.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4a6e2926247e7ec45b8f8f53f9922424d816df30455cf567a7665a080bf1153
3
+ size 19741648
DSing/sing_300x30x2/AU/AUVocals/3771478_3771478-538293740_1663834938-AU-M-180102765.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03cf9a330e200a615f97a72ffe815a2c45e80ae87bb1c39e88f00cbe9d5a805b
3
+ size 26723236
DSing/sing_300x30x2/AU/AUVocals/40228097_169333-863902410_1558169302-AU-F-238098736.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a99a479c8746620dcf89a15644f2278885947b73c6f382bce8ca80aa24625b12
3
+ size 23535600
DSing/sing_300x30x2/AU/AUVocals/409146418_113102-1116943097_1689981903-AU-F-1116943324.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16bd6138d725a69bce37752fdf247ff693a42033dcaa69acbcd251953be3c47e
3
+ size 13908396
DSing/sing_300x30x2/AU/AUVocals/4385219_4385219-74242534_1661534803-AU-F-578766880.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb9217603e9f9a7387bca06f6fed611734a9ca0997a5a884d905236528c72e65
3
+ size 21720544
DSing/sing_300x30x2/AU/AUVocals/474572_111629-707863177_1441046998-AU-F-707873017.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bc3b14d9a68f425891fdeb426892fd82180f5080054587129b09fef613858be
3
+ size 24265900
DSing/sing_300x30x2/AU/AUVocals/530817499_1833105-882003598_1682046239-AU-M-882000596.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b9b68444ec7619fd69e3ed240714aaf2a39338ec51417384a1339b999bea679
3
+ size 11851180
DSing/sing_300x30x2/AU/AUVocals/53709742_99250-367325249_1522374367-AU-M-449511466.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ca124e717558beb25fccba7bbad86c1a2dbc4251f98965b0cb61e409e505aa1
3
+ size 18379656
DSing/sing_300x30x2/AU/AUVocals/587178900_1824409-1046397871_1525665397-AU-M-1009387124.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a11601faef9ed6c25ecbf7ef860f55626427cab37d2c08c84ffab3e98d47e02
3
+ size 16385712
DSing/sing_300x30x2/AU/AUVocals/64422077_111775-133787857_1608718178-AU-M-425576199.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d7abdfa57cea2c1723829cf5ecc0811eb8df091432d53af6c19f192ebd675f7
3
+ size 26948376
DSing/sing_300x30x2/AU/AUVocals/66512068_448309-424554687_1634821714-AU-M-424549845.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d090bd4725ad4d6245ac796ce40b25d3888600430b6f7ea8252348bda8a193c4
3
+ size 22087096
DSing/sing_300x30x2/AU/AUVocals/670462920_1440483-1046340502_1538129960-AU-M-1046341495.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e0d5ddb15301e809db89c15e2b60179ac8c6ac2c45568f8beaa7fc701971451
3
+ size 23586736
DSing/sing_300x30x2/AU/AUVocals/671453965_1179860-124368911_1654337020-AU-M-1160483819.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af04df31fe8fc4e4a0da5ba1aeb8f0d62c33b056a839bc94feb29b3cdad045cd
3
+ size 32948652
DSing/sing_300x30x2/AU/AUVocals/687441455_1598516-1283348030_1653892220-AU-M-1257175175.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08664bdcc63cc306a3da53530c58971be9792ff42f8e7be00774915f168faced
3
+ size 13794988
DSing/sing_300x30x2/AU/AUVocals/689806719_841385-77916125_1576405705-AU-M-77917061.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8b48cd3fa0a715cdddbf8de9c810e96ff0447d59ff3312bf5478ba3ad763760
3
+ size 19573348
DSing/sing_300x30x2/AU/AUVocals/709155717_2053380-1315163597_1602466189-AU-F-749289766.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:600f258e5d94a949eb50689b82480d63f20cbb93dce78d4aed072e1a7599a88f
3
+ size 23780248
DSing/sing_300x30x2/AU/AUVocals/757082027_986792-874217558_1597789131-AU-M-800005312.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6c55ec499c0b09b3e6339c63ea55e866a434e18bf9f6f8e2665778109b44d0c
3
+ size 23103296
DSing/sing_300x30x2/AU/AUVocals/789788546_2193873-706068094_1543553510-AU-F-706058331.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df6800224bf37a6fbb31ca5d6ee59efc39182efa60bd48300c7ae73d23ee88aa
3
+ size 4854700
DSing/sing_300x30x2/AU/AUVocals/90470934_2940068-417337448_1568372565-AU-F-417337457.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6e04a6c024e166b26b820457365edaf52dbd6e907e21a40332e2dc639f1fe1c
3
+ size 21122584
DSing/sing_300x30x2/AU/AUVocals/95450022_546114-534290805_1577875290-AU-M-892339901.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2cb29c946370150c09cf933fcfe8cdfe6a027ddec5b0ea021f89de5c3ea425d
3
+ size 20806048
DSing/sing_300x30x2/IT/ITVocals/139427932_58803-626568036_1662347130-IT-M-626573007.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ace842999a927d560a3f46191ce1cb2b182e9a86c2166b8f3a3014ac9e936e2
3
+ size 19765052