|
import torch |
|
import torchaudio |
|
import torchaudio.transforms as transforms |
|
from fairseq.models.wav2vec import Wav2VecModel |
|
from fairseq.data.data_utils import post_process |
|
from fairseq.tasks.audio import AudioPretrainingTask |
|
from fairseq import checkpoint_utils |
|
from examples.speech_to_text.data_utils import extract_fbank_features |
|
|
|
def main(audio_path, checkpoint_path): |
|
|
|
|
|
def extract_features(audio_path): |
|
waveform, sample_rate = sf.read(audio_path) |
|
features = extract_fbank_features(waveform, sample_rate) |
|
return features |
|
|
|
|
|
fbank_features = extract_features(audio_path).numpy() |
|
|
|
|
|
model, cfg, task = checkpoint_utils.load_model_ensemble_and_task([checkpoint_path]) |
|
model = model[0] |
|
model.eval() |
|
|
|
|
|
fbank_tensor = torch.from_numpy(fbank_features) |
|
|
|
|
|
fbank_tensor = task.apply_input_transform(fbank_tensor) |
|
|
|
|
|
fbank_tensor = fbank_tensor.to(cfg.common.device) |
|
|
|
|
|
sample = {"net_input": {"source": fbank_tensor.unsqueeze(0)}} |
|
|
|
|
|
with torch.no_grad(): |
|
hypos = task.inference_step(generator=model, models=[model], sample=sample) |
|
|
|
|
|
hypo_tokens = hypos[0][0]["tokens"].int().cpu() |
|
|
|
|
|
hypo_str = post_process(hypo_tokens, cfg.task.target_dictionary) |
|
|
|
return hypo_str |
|
|
|
|
|
if __name__ == "__main__": |
|
audio_file_path = "/content/drive/MyDrive/en2hi/fairseq_mustc_single_inference/test.wav" |
|
checkpoint_path = "/content/drive/MyDrive/en2hi/fairseq_mustc_single_inference/st_avg_last_10_checkpoints.pt" |
|
prediction = main(audio_file_path, checkpoint_path) |
|
print("Predicted text:", prediction) |
|
|