ESPnet
jp
audio
singing-voice-synthesis
ftshijt commited on
Commit
33b6bba
1 Parent(s): 39a662e

Update model

Browse files
README.md CHANGED
@@ -1,3 +1,332 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ tags:
3
+ - espnet
4
+ - audio
5
+ - singing-voice-synthesis
6
+ language: jp
7
+ datasets:
8
+ - kiritan
9
+ license: cc-by-4.0
10
  ---
11
+
12
+ ## ESPnet2 SVS model
13
+
14
+ ### `espnet/kiritan_svs_rnn`
15
+
16
+ This model was trained by ftshijt using kiritan recipe in [espnet](https://github.com/espnet/espnet/).
17
+
18
+ ### Demo: How to use in ESPnet2
19
+
20
+ Follow the [ESPnet installation instructions](https://espnet.github.io/espnet/installation.html)
21
+ if you haven't done that already.
22
+
23
+ ```bash
24
+ cd espnet
25
+ git checkout 5c4d7cf7feba8461de2e1080bf82182f0efaef38
26
+ pip install -e .
27
+ cd egs2/kiritan/svs1
28
+ ./run.sh --skip_data_prep false --skip_train true --download_model espnet/kiritan_svs_rnn
29
+ ```
30
+
31
+
32
+
33
+ ## SVS config
34
+
35
+ <details><summary>expand</summary>
36
+
37
+ ```
38
+ config: conf/tuning/train_naive_rnn_dp.yaml
39
+ print_config: false
40
+ log_level: INFO
41
+ drop_last_iter: false
42
+ dry_run: false
43
+ iterator_type: sequence
44
+ valid_iterator_type: null
45
+ output_dir: exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp
46
+ ngpu: 1
47
+ seed: 0
48
+ num_workers: 8
49
+ num_att_plot: 3
50
+ dist_backend: nccl
51
+ dist_init_method: env://
52
+ dist_world_size: null
53
+ dist_rank: null
54
+ local_rank: 0
55
+ dist_master_addr: null
56
+ dist_master_port: null
57
+ dist_launcher: null
58
+ multiprocessing_distributed: false
59
+ unused_parameters: false
60
+ sharded_ddp: false
61
+ cudnn_enabled: true
62
+ cudnn_benchmark: false
63
+ cudnn_deterministic: true
64
+ collect_stats: false
65
+ write_collected_feats: false
66
+ max_epoch: 500
67
+ patience: null
68
+ val_scheduler_criterion:
69
+ - valid
70
+ - loss
71
+ early_stopping_criterion:
72
+ - valid
73
+ - loss
74
+ - min
75
+ best_model_criterion:
76
+ - - valid
77
+ - loss
78
+ - min
79
+ - - train
80
+ - loss
81
+ - min
82
+ keep_nbest_models: 2
83
+ nbest_averaging_interval: 0
84
+ grad_clip: 1.0
85
+ grad_clip_type: 2.0
86
+ grad_noise: false
87
+ accum_grad: 1
88
+ no_forward_run: false
89
+ resume: true
90
+ train_dtype: float32
91
+ use_amp: false
92
+ log_interval: null
93
+ use_matplotlib: true
94
+ use_tensorboard: true
95
+ create_graph_in_tensorboard: false
96
+ use_wandb: false
97
+ wandb_project: null
98
+ wandb_id: null
99
+ wandb_entity: null
100
+ wandb_name: null
101
+ wandb_model_log_interval: -1
102
+ detect_anomaly: false
103
+ use_lora: false
104
+ save_lora_only: true
105
+ lora_conf: {}
106
+ pretrain_path: null
107
+ init_param: []
108
+ ignore_init_mismatch: false
109
+ freeze_param: []
110
+ num_iters_per_epoch: null
111
+ batch_size: 16
112
+ valid_batch_size: null
113
+ batch_bins: 1000000
114
+ valid_batch_bins: null
115
+ train_shape_file:
116
+ - exp/svs_stats_raw_phn_pyopenjtalk_jp/train/text_shape.phn
117
+ - exp/svs_stats_raw_phn_pyopenjtalk_jp/train/singing_shape
118
+ valid_shape_file:
119
+ - exp/svs_stats_raw_phn_pyopenjtalk_jp/valid/text_shape.phn
120
+ - exp/svs_stats_raw_phn_pyopenjtalk_jp/valid/singing_shape
121
+ batch_type: sorted
122
+ valid_batch_type: null
123
+ fold_length:
124
+ - 150
125
+ - 240000
126
+ sort_in_batch: descending
127
+ shuffle_within_batch: false
128
+ sort_batch: descending
129
+ multiple_iterator: false
130
+ chunk_length: 500
131
+ chunk_shift_ratio: 0.5
132
+ num_cache_chunks: 1024
133
+ chunk_excluded_key_prefixes: []
134
+ chunk_default_fs: null
135
+ train_data_path_and_name_and_type:
136
+ - - dump/raw/tr_no_dev/text
137
+ - text
138
+ - text
139
+ - - dump/raw/tr_no_dev/wav.scp
140
+ - singing
141
+ - sound
142
+ - - dump/raw/tr_no_dev/label
143
+ - label
144
+ - duration
145
+ - - dump/raw/tr_no_dev/score.scp
146
+ - score
147
+ - score
148
+ valid_data_path_and_name_and_type:
149
+ - - dump/raw/dev/text
150
+ - text
151
+ - text
152
+ - - dump/raw/dev/wav.scp
153
+ - singing
154
+ - sound
155
+ - - dump/raw/dev/label
156
+ - label
157
+ - duration
158
+ - - dump/raw/dev/score.scp
159
+ - score
160
+ - score
161
+ allow_variable_data_keys: false
162
+ max_cache_size: 0.0
163
+ max_cache_fd: 32
164
+ allow_multi_rates: false
165
+ valid_max_cache_size: null
166
+ exclude_weight_decay: false
167
+ exclude_weight_decay_conf: {}
168
+ optim: adam
169
+ optim_conf:
170
+ lr: 0.001
171
+ eps: 1.0e-06
172
+ weight_decay: 0.0
173
+ scheduler: null
174
+ scheduler_conf: {}
175
+ token_list:
176
+ - <blank>
177
+ - <unk>
178
+ - pau
179
+ - a
180
+ - i
181
+ - o
182
+ - e
183
+ - u
184
+ - k
185
+ - n
186
+ - r
187
+ - t
188
+ - m
189
+ - d
190
+ - s
191
+ - N
192
+ - sh
193
+ - g
194
+ - y
195
+ - b
196
+ - w
197
+ - cl
198
+ - ts
199
+ - z
200
+ - ch
201
+ - j
202
+ - h
203
+ - f
204
+ - p
205
+ - ky
206
+ - ry
207
+ - hy
208
+ - py
209
+ - ny
210
+ - <sos/eos>
211
+ odim: null
212
+ model_conf: {}
213
+ use_preprocessor: true
214
+ token_type: phn
215
+ bpemodel: null
216
+ non_linguistic_symbols: null
217
+ cleaner: null
218
+ g2p: pyopenjtalk
219
+ fs: 24000
220
+ score_feats_extract: syllable_score_feats
221
+ score_feats_extract_conf:
222
+ fs: 24000
223
+ n_fft: 2048
224
+ win_length: 1200
225
+ hop_length: 300
226
+ feats_extract: fbank
227
+ feats_extract_conf:
228
+ n_fft: 2048
229
+ hop_length: 300
230
+ win_length: 1200
231
+ fs: 24000
232
+ fmin: 80
233
+ fmax: 7600
234
+ n_mels: 80
235
+ normalize: global_mvn
236
+ normalize_conf:
237
+ stats_file: exp/svs_stats_raw_phn_pyopenjtalk_jp/train/feats_stats.npz
238
+ svs: naive_rnn_dp
239
+ svs_conf:
240
+ midi_dim: 129
241
+ embed_dim: 512
242
+ duration_dim: 500
243
+ eprenet_conv_layers: 0
244
+ eprenet_conv_chans: 256
245
+ eprenet_conv_filts: 3
246
+ elayers: 3
247
+ eunits: 256
248
+ ebidirectional: true
249
+ midi_embed_integration_type: add
250
+ dlayers: 2
251
+ dunits: 256
252
+ dbidirectional: true
253
+ postnet_layers: 5
254
+ postnet_chans: 512
255
+ postnet_filts: 5
256
+ use_batch_norm: true
257
+ reduction_factor: 1
258
+ eprenet_dropout_rate: 0.2
259
+ edropout_rate: 0.1
260
+ ddropout_rate: 0.1
261
+ postnet_dropout_rate: 0.5
262
+ init_type: pytorch
263
+ use_masking: true
264
+ pitch_extract: dio
265
+ pitch_extract_conf:
266
+ use_token_averaged_f0: false
267
+ fs: 24000
268
+ n_fft: 2048
269
+ hop_length: 300
270
+ f0max: 800
271
+ f0min: 80
272
+ reduction_factor: 1
273
+ pitch_normalize: global_mvn
274
+ pitch_normalize_conf:
275
+ stats_file: exp/svs_stats_raw_phn_pyopenjtalk_jp/train/pitch_stats.npz
276
+ ying_extract: null
277
+ ying_extract_conf: {}
278
+ energy_extract: null
279
+ energy_extract_conf: {}
280
+ energy_normalize: null
281
+ energy_normalize_conf: {}
282
+ required:
283
+ - output_dir
284
+ - token_list
285
+ version: '202310'
286
+ distributed: false
287
+ ```
288
+
289
+ </details>
290
+
291
+
292
+
293
+ ### Citing ESPnet
294
+
295
+ ```BibTex
296
+ @inproceedings{watanabe2018espnet,
297
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
298
+ title={{ESPnet}: End-to-End Speech Processing Toolkit},
299
+ year={2018},
300
+ booktitle={Proceedings of Interspeech},
301
+ pages={2207--2211},
302
+ doi={10.21437/Interspeech.2018-1456},
303
+ url={http://dx.doi.org/10.21437/Interspeech.2018-1456}
304
+ }
305
+
306
+
307
+
308
+
309
+
310
+
311
+ @inproceedings{shi22d_interspeech,
312
+ author={Jiatong Shi and Shuai Guo and Tao Qian and Tomoki Hayashi and Yuning Wu and Fangzheng Xu and Xuankai Chang and Huazhe Li and Peter Wu and Shinji Watanabe and Qin Jin},
313
+ title={{Muskits: an End-to-end Music Processing Toolkit for Singing Voice Synthesis}},
314
+ year=2022,
315
+ booktitle={Proc. Interspeech 2022},
316
+ pages={4277--4281},
317
+ doi={10.21437/Interspeech.2022-10039}
318
+ }
319
+ ```
320
+
321
+ or arXiv:
322
+
323
+ ```bibtex
324
+ @misc{watanabe2018espnet,
325
+ title={ESPnet: End-to-End Speech Processing Toolkit},
326
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Yalta and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
327
+ year={2018},
328
+ eprint={1804.00015},
329
+ archivePrefix={arXiv},
330
+ primaryClass={cs.CL}
331
+ }
332
+ ```
exp/svs_stats_raw_phn_pyopenjtalk_jp/train/feats_stats.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3f8760a03a38e9f6aedceafc940853e135de03be88dc0f400f80111012ae2f4
3
+ size 1402
exp/svs_stats_raw_phn_pyopenjtalk_jp/train/pitch_stats.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:472f44554816456248d361dd80b0a2a3d17c6dc420486a72fd7a0eedb2144f99
3
+ size 770
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/263epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca72943a6a697d70db0d30cc6dcea6418c3d56857ca6c602eb5a177fe5b96a9a
3
+ size 86890167
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/config.yaml ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: conf/tuning/train_naive_rnn_dp.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ drop_last_iter: false
5
+ dry_run: false
6
+ iterator_type: sequence
7
+ valid_iterator_type: null
8
+ output_dir: exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp
9
+ ngpu: 1
10
+ seed: 0
11
+ num_workers: 8
12
+ num_att_plot: 3
13
+ dist_backend: nccl
14
+ dist_init_method: env://
15
+ dist_world_size: null
16
+ dist_rank: null
17
+ local_rank: 0
18
+ dist_master_addr: null
19
+ dist_master_port: null
20
+ dist_launcher: null
21
+ multiprocessing_distributed: false
22
+ unused_parameters: false
23
+ sharded_ddp: false
24
+ cudnn_enabled: true
25
+ cudnn_benchmark: false
26
+ cudnn_deterministic: true
27
+ collect_stats: false
28
+ write_collected_feats: false
29
+ max_epoch: 500
30
+ patience: null
31
+ val_scheduler_criterion:
32
+ - valid
33
+ - loss
34
+ early_stopping_criterion:
35
+ - valid
36
+ - loss
37
+ - min
38
+ best_model_criterion:
39
+ - - valid
40
+ - loss
41
+ - min
42
+ - - train
43
+ - loss
44
+ - min
45
+ keep_nbest_models: 2
46
+ nbest_averaging_interval: 0
47
+ grad_clip: 1.0
48
+ grad_clip_type: 2.0
49
+ grad_noise: false
50
+ accum_grad: 1
51
+ no_forward_run: false
52
+ resume: true
53
+ train_dtype: float32
54
+ use_amp: false
55
+ log_interval: null
56
+ use_matplotlib: true
57
+ use_tensorboard: true
58
+ create_graph_in_tensorboard: false
59
+ use_wandb: false
60
+ wandb_project: null
61
+ wandb_id: null
62
+ wandb_entity: null
63
+ wandb_name: null
64
+ wandb_model_log_interval: -1
65
+ detect_anomaly: false
66
+ use_lora: false
67
+ save_lora_only: true
68
+ lora_conf: {}
69
+ pretrain_path: null
70
+ init_param: []
71
+ ignore_init_mismatch: false
72
+ freeze_param: []
73
+ num_iters_per_epoch: null
74
+ batch_size: 16
75
+ valid_batch_size: null
76
+ batch_bins: 1000000
77
+ valid_batch_bins: null
78
+ train_shape_file:
79
+ - exp/svs_stats_raw_phn_pyopenjtalk_jp/train/text_shape.phn
80
+ - exp/svs_stats_raw_phn_pyopenjtalk_jp/train/singing_shape
81
+ valid_shape_file:
82
+ - exp/svs_stats_raw_phn_pyopenjtalk_jp/valid/text_shape.phn
83
+ - exp/svs_stats_raw_phn_pyopenjtalk_jp/valid/singing_shape
84
+ batch_type: sorted
85
+ valid_batch_type: null
86
+ fold_length:
87
+ - 150
88
+ - 240000
89
+ sort_in_batch: descending
90
+ shuffle_within_batch: false
91
+ sort_batch: descending
92
+ multiple_iterator: false
93
+ chunk_length: 500
94
+ chunk_shift_ratio: 0.5
95
+ num_cache_chunks: 1024
96
+ chunk_excluded_key_prefixes: []
97
+ chunk_default_fs: null
98
+ train_data_path_and_name_and_type:
99
+ - - dump/raw/tr_no_dev/text
100
+ - text
101
+ - text
102
+ - - dump/raw/tr_no_dev/wav.scp
103
+ - singing
104
+ - sound
105
+ - - dump/raw/tr_no_dev/label
106
+ - label
107
+ - duration
108
+ - - dump/raw/tr_no_dev/score.scp
109
+ - score
110
+ - score
111
+ valid_data_path_and_name_and_type:
112
+ - - dump/raw/dev/text
113
+ - text
114
+ - text
115
+ - - dump/raw/dev/wav.scp
116
+ - singing
117
+ - sound
118
+ - - dump/raw/dev/label
119
+ - label
120
+ - duration
121
+ - - dump/raw/dev/score.scp
122
+ - score
123
+ - score
124
+ allow_variable_data_keys: false
125
+ max_cache_size: 0.0
126
+ max_cache_fd: 32
127
+ allow_multi_rates: false
128
+ valid_max_cache_size: null
129
+ exclude_weight_decay: false
130
+ exclude_weight_decay_conf: {}
131
+ optim: adam
132
+ optim_conf:
133
+ lr: 0.001
134
+ eps: 1.0e-06
135
+ weight_decay: 0.0
136
+ scheduler: null
137
+ scheduler_conf: {}
138
+ token_list:
139
+ - <blank>
140
+ - <unk>
141
+ - pau
142
+ - a
143
+ - i
144
+ - o
145
+ - e
146
+ - u
147
+ - k
148
+ - n
149
+ - r
150
+ - t
151
+ - m
152
+ - d
153
+ - s
154
+ - N
155
+ - sh
156
+ - g
157
+ - y
158
+ - b
159
+ - w
160
+ - cl
161
+ - ts
162
+ - z
163
+ - ch
164
+ - j
165
+ - h
166
+ - f
167
+ - p
168
+ - ky
169
+ - ry
170
+ - hy
171
+ - py
172
+ - ny
173
+ - <sos/eos>
174
+ odim: null
175
+ model_conf: {}
176
+ use_preprocessor: true
177
+ token_type: phn
178
+ bpemodel: null
179
+ non_linguistic_symbols: null
180
+ cleaner: null
181
+ g2p: pyopenjtalk
182
+ fs: 24000
183
+ score_feats_extract: syllable_score_feats
184
+ score_feats_extract_conf:
185
+ fs: 24000
186
+ n_fft: 2048
187
+ win_length: 1200
188
+ hop_length: 300
189
+ feats_extract: fbank
190
+ feats_extract_conf:
191
+ n_fft: 2048
192
+ hop_length: 300
193
+ win_length: 1200
194
+ fs: 24000
195
+ fmin: 80
196
+ fmax: 7600
197
+ n_mels: 80
198
+ normalize: global_mvn
199
+ normalize_conf:
200
+ stats_file: exp/svs_stats_raw_phn_pyopenjtalk_jp/train/feats_stats.npz
201
+ svs: naive_rnn_dp
202
+ svs_conf:
203
+ midi_dim: 129
204
+ embed_dim: 512
205
+ duration_dim: 500
206
+ eprenet_conv_layers: 0
207
+ eprenet_conv_chans: 256
208
+ eprenet_conv_filts: 3
209
+ elayers: 3
210
+ eunits: 256
211
+ ebidirectional: true
212
+ midi_embed_integration_type: add
213
+ dlayers: 2
214
+ dunits: 256
215
+ dbidirectional: true
216
+ postnet_layers: 5
217
+ postnet_chans: 512
218
+ postnet_filts: 5
219
+ use_batch_norm: true
220
+ reduction_factor: 1
221
+ eprenet_dropout_rate: 0.2
222
+ edropout_rate: 0.1
223
+ ddropout_rate: 0.1
224
+ postnet_dropout_rate: 0.5
225
+ init_type: pytorch
226
+ use_masking: true
227
+ pitch_extract: dio
228
+ pitch_extract_conf:
229
+ use_token_averaged_f0: false
230
+ fs: 24000
231
+ n_fft: 2048
232
+ hop_length: 300
233
+ f0max: 800
234
+ f0min: 80
235
+ reduction_factor: 1
236
+ pitch_normalize: global_mvn
237
+ pitch_normalize_conf:
238
+ stats_file: exp/svs_stats_raw_phn_pyopenjtalk_jp/train/pitch_stats.npz
239
+ ying_extract: null
240
+ ying_extract_conf: {}
241
+ energy_extract: null
242
+ energy_extract_conf: {}
243
+ energy_normalize: null
244
+ energy_normalize_conf: {}
245
+ required:
246
+ - output_dir
247
+ - token_list
248
+ version: '202310'
249
+ distributed: false
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/backward_time.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/clip.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/duration_loss.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/forward_time.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/gpu_max_cached_mem_GB.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/grad_norm.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/iter_time.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/l1_loss.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/loss.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/loss_scale.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/optim0_lr0.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/optim_step_time.png ADDED
exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/images/train_time.png ADDED
meta.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ espnet: '202310'
2
+ files:
3
+ model_file: exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/263epoch.pth
4
+ python: "3.9.16 (main, Mar 8 2023, 14:00:05) \n[GCC 11.2.0]"
5
+ timestamp: 1702989980.633902
6
+ torch: 1.13.1+cu117
7
+ yaml_files:
8
+ train_config: exp/svs_train_naive_rnn_dp_raw_phn_pyopenjtalk_jp/config.yaml