File size: 2,282 Bytes
8c92a11 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
# Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from argparse import ArgumentParser
import os
from models.tta.ldm.audioldm_inference import AudioLDMInference
from utils.util import save_config, load_model_config, load_config
import numpy as np
import torch
def build_inference(args, cfg):
supported_inference = {
"AudioLDM": AudioLDMInference,
}
inference_class = supported_inference[cfg.model_type]
inference = inference_class(args, cfg)
return inference
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
type=str,
required=True,
help="JSON/YAML file for configurations.",
)
parser.add_argument(
"--text",
help="Text to be synthesized",
type=str,
default="Text to be synthesized.",
)
parser.add_argument(
"--checkpoint_path",
type=str,
)
parser.add_argument(
"--vocoder_path", type=str, help="Checkpoint path of the vocoder"
)
parser.add_argument(
"--vocoder_config_path", type=str, help="Config path of the vocoder"
)
parser.add_argument(
"--output_dir",
type=str,
default=None,
help="Output dir for saving generated results",
)
parser.add_argument(
"--num_steps",
type=int,
default=200,
help="The total number of denosing steps",
)
parser.add_argument(
"--guidance_scale",
type=float,
default=4.0,
help="The scale of classifer free guidance",
)
parser.add_argument("--local_rank", default=-1, type=int)
return parser
def main():
# Parse arguments
args = build_parser().parse_args()
# args, infer_type = formulate_parser(args)
# Parse config
cfg = load_config(args.config)
if torch.cuda.is_available():
args.local_rank = torch.device("cuda")
else:
args.local_rank = torch.device("cpu")
print("args: ", args)
# Build inference
inferencer = build_inference(args, cfg)
# Run inference
inferencer.inference()
if __name__ == "__main__":
main()
|