balaramas's picture
Upload 16 files
af54c89
import torch
import torchaudio
import torchaudio.transforms as transforms
from fairseq.models.wav2vec import Wav2VecModel
from fairseq.data.data_utils import post_process
from fairseq.tasks.audio import AudioPretrainingTask
from fairseq import checkpoint_utils
from examples.speech_to_text.data_utils import extract_fbank_features
def main(audio_path, checkpoint_path):
# Load the audio file
def extract_features(audio_path):
waveform, sample_rate = sf.read(audio_path)
features = extract_fbank_features(waveform, sample_rate)
return features
fbank_features = extract_features(audio_path).numpy()
# Load the pre-trained model checkpoint
model, cfg, task = checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
model = model[0]
model.eval()
# Convert the fbank features to a torch tensor
fbank_tensor = torch.from_numpy(fbank_features)
# Apply normalization if necessary
fbank_tensor = task.apply_input_transform(fbank_tensor)
# Move the tensor to the same device as the model
fbank_tensor = fbank_tensor.to(cfg.common.device)
# Wrap the fbank tensor in a dictionary to match the fairseq batch format
sample = {"net_input": {"source": fbank_tensor.unsqueeze(0)}}
# Perform fairseq generation
with torch.no_grad():
hypos = task.inference_step(generator=model, models=[model], sample=sample)
# Extract the predicted tokens from the top hypothesis
hypo_tokens = hypos[0][0]["tokens"].int().cpu()
# Convert tokens to string using the target dictionary and post-processing
hypo_str = post_process(hypo_tokens, cfg.task.target_dictionary)
return hypo_str
if __name__ == "__main__":
audio_file_path = "/content/drive/MyDrive/en2hi/fairseq_mustc_single_inference/test.wav"
checkpoint_path = "/content/drive/MyDrive/en2hi/fairseq_mustc_single_inference/st_avg_last_10_checkpoints.pt"
prediction = main(audio_file_path, checkpoint_path)
print("Predicted text:", prediction)