|
|
|
|
|
|
|
|
|
|
|
|
|
exp_dir=$(cd `dirname $0`; pwd) |
|
work_dir=$(dirname $(dirname $(dirname $exp_dir))) |
|
|
|
export WORK_DIR=$work_dir |
|
export PYTHONPATH=$work_dir |
|
export PYTHONIOENCODING=UTF-8 |
|
|
|
cd $work_dir/modules/monotonic_align |
|
mkdir -p monotonic_align |
|
python setup.py build_ext --inplace |
|
cd $work_dir |
|
|
|
|
|
options=$(getopt -o c:n:s --long gpu:,config:,infer_expt_dir:,ar_model_ckpt_dir:,infer_output_dir:,infer_mode:,infer_test_list_file:,infer_text:,infer_text_prompt:,infer_audio_prompt:,model_train_stage:,name:,stage:,resume:,resume_from_ckpt_path:,resume_type: -- "$@") |
|
eval set -- "$options" |
|
|
|
while true; do |
|
case $1 in |
|
|
|
-c | --config) shift; exp_config=$1 ; shift ;; |
|
|
|
-n | --name) shift; exp_name=$1 ; shift ;; |
|
|
|
-s | --stage) shift; running_stage=$1 ; shift ;; |
|
|
|
--gpu) shift; gpu=$1 ; shift ;; |
|
|
|
|
|
--model_train_stage) shift; model_train_stage=$1 ; shift ;; |
|
|
|
--ar_model_ckpt_dir) shift; ar_model_ckpt_dir=$1 ; shift ;; |
|
|
|
|
|
--infer_expt_dir) shift; infer_expt_dir=$1 ; shift ;; |
|
|
|
--infer_output_dir) shift; infer_output_dir=$1 ; shift ;; |
|
|
|
|
|
--infer_mode) shift; infer_mode=$1 ; shift ;; |
|
|
|
--infer_test_list_file) shift; infer_test_list_file=$1 ; shift ;; |
|
|
|
--infer_text) shift; infer_text=$1 ; shift ;; |
|
|
|
--infer_text_prompt) shift; infer_text_prompt=$1 ; shift ;; |
|
|
|
--infer_audio_prompt) shift; infer_audio_prompt=$1 ; shift ;; |
|
|
|
|
|
--resume) shift; resume=$1 ; shift ;; |
|
|
|
--resume_from_ckpt_path) shift; resume_from_ckpt_path=$1 ; shift ;; |
|
|
|
--resume_type) shift; resume_type=$1 ; shift ;; |
|
|
|
--) shift ; break ;; |
|
*) echo "Invalid option: $1" exit 1 ;; |
|
esac |
|
done |
|
|
|
|
|
|
|
if [ -z "$running_stage" ]; then |
|
echo "[Error] Please specify the running stage" |
|
exit 1 |
|
fi |
|
|
|
if [ -z "$exp_config" ]; then |
|
exp_config="${exp_dir}"/exp_config.json |
|
fi |
|
echo "Exprimental Configuration File: $exp_config" |
|
|
|
if [ -z "$gpu" ]; then |
|
gpu="0" |
|
fi |
|
|
|
|
|
if [ $running_stage -eq 1 ]; then |
|
CUDA_VISIBLE_DEVICES=$gpu python "${work_dir}"/bins/tts/preprocess.py \ |
|
--config=$exp_config \ |
|
--num_workers=4 |
|
fi |
|
|
|
|
|
if [ $running_stage -eq 2 ]; then |
|
if [ -z "$exp_name" ]; then |
|
echo "[Error] Please specify the experiments name" |
|
exit 1 |
|
fi |
|
|
|
if [ "$model_train_stage" = "2" ] && [ -z "$ar_model_ckpt_dir" ]; then |
|
echo "[Error] Please specify the ckeckpoint path to the trained model in stage1." |
|
exit 1 |
|
fi |
|
|
|
if [ "$model_train_stage" = "1" ]; then |
|
ar_model_ckpt_dir=None |
|
fi |
|
|
|
echo "Exprimental Name: $exp_name" |
|
|
|
|
|
if [ -z "$resume_from_ckpt_path" ]; then |
|
resume_from_ckpt_path="" |
|
fi |
|
|
|
if [ -z "$resume_type" ]; then |
|
resume_type="resume" |
|
fi |
|
|
|
|
|
if [ "$resume" = true ]; then |
|
echo "Resume from the existing experiment..." |
|
CUDA_VISIBLE_DEVICES=$gpu accelerate launch --main_process_port 29510 \ |
|
"${work_dir}"/bins/tts/train.py \ |
|
--config $exp_config \ |
|
--exp_name $exp_name \ |
|
--log_level debug \ |
|
--train_stage $model_train_stage \ |
|
--ar_model_ckpt_dir $ar_model_ckpt_dir \ |
|
--resume \ |
|
--checkpoint_path "$resume_from_ckpt_path" \ |
|
--resume_type "$resume_type" |
|
else |
|
echo "Start a new experiment..." |
|
CUDA_VISIBLE_DEVICES=$gpu accelerate launch --main_process_port 29510 \ |
|
"${work_dir}"/bins/tts/train.py \ |
|
--config $exp_config \ |
|
--exp_name $exp_name \ |
|
--log_level debug \ |
|
--train_stage $model_train_stage \ |
|
--ar_model_ckpt_dir $ar_model_ckpt_dir |
|
fi |
|
fi |
|
|
|
|
|
|
|
if [ $running_stage -eq 3 ]; then |
|
if [ -z "$infer_expt_dir" ]; then |
|
echo "[Error] Please specify the experimental directionary. The value is like [Your path to save logs and checkpoints]/[YourExptName]" |
|
exit 1 |
|
fi |
|
|
|
if [ -z "$infer_output_dir" ]; then |
|
infer_output_dir="$expt_dir/result" |
|
fi |
|
|
|
if [ -z "$infer_mode" ]; then |
|
echo "[Error] Please specify the inference mode, e.g., "batch", "single"" |
|
exit 1 |
|
fi |
|
|
|
if [ "$infer_mode" = "batch" ] && [ -z "$infer_test_list_file" ]; then |
|
echo "[Error] Please specify the test list file used in inference when the inference mode is batch" |
|
exit 1 |
|
fi |
|
|
|
if [ "$infer_mode" = "single" ] && [ -z "$infer_text" ]; then |
|
echo "[Error] Please specify the text to be synthesized when the inference mode is single" |
|
exit 1 |
|
fi |
|
|
|
if [ "$infer_mode" = "single" ]; then |
|
echo 'Text: ' ${infer_text} |
|
infer_test_list_file=None |
|
elif [ "$infer_mode" = "batch" ]; then |
|
infer_text="" |
|
infer_text_prompt="" |
|
infer_audio_prompt="" |
|
fi |
|
|
|
|
|
CUDA_VISIBLE_DEVICES=$gpu accelerate launch "$work_dir"/bins/tts/inference.py \ |
|
--config $exp_config \ |
|
--log_level debug \ |
|
--acoustics_dir $infer_expt_dir \ |
|
--output_dir $infer_output_dir \ |
|
--mode $infer_mode \ |
|
--text "$infer_text" \ |
|
--text_prompt "$infer_text_prompt" \ |
|
--audio_prompt $infer_audio_prompt\ |
|
--test_list_file $infer_test_list_file \ |
|
|
|
fi |
|
|