import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchaudio import sys import matplotlib.pyplot as plt import IPython.display as ipd from tqdm import tqdm import gradio as gr device = torch.device("cuda" if torch.cuda.is_available() else "cpu") sample_rate = 16000 new_sample_rate = 8000 transform = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=new_sample_rate) class M5(nn.Module): def __init__(self, n_input=1, n_output=35, stride=16, n_channel=32): super().__init__() self.conv1 = nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride) self.bn1 = nn.BatchNorm1d(n_channel) self.pool1 = nn.MaxPool1d(4) self.conv2 = nn.Conv1d(n_channel, n_channel, kernel_size=3) self.bn2 = nn.BatchNorm1d(n_channel) self.pool2 = nn.MaxPool1d(4) self.conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size=3) self.bn3 = nn.BatchNorm1d(2 * n_channel) self.pool3 = nn.MaxPool1d(4) self.conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=3) self.bn4 = nn.BatchNorm1d(2 * n_channel) self.pool4 = nn.MaxPool1d(4) self.fc1 = nn.Linear(2 * n_channel, n_output) def forward(self, x): x = self.conv1(x) x = F.relu(self.bn1(x)) x = self.pool1(x) x = self.conv2(x) x = F.relu(self.bn2(x)) x = self.pool2(x) x = self.conv3(x) x = F.relu(self.bn3(x)) x = self.pool3(x) x = self.conv4(x) x = F.relu(self.bn4(x)) x = self.pool4(x) x = F.avg_pool1d(x, x.shape[-1]) x = x.permute(0, 2, 1) x = self.fc1(x) return F.log_softmax(x, dim=2) def get_likely_index(tensor): # find most likely label index for each element in the batch return tensor.argmax(dim=-1) def index_to_label(index): # Return the word corresponding to the index in labels # This is the inverse of label_to_index return labels[index] def predict(filepath): tensor=(torchaudio.load(filepath))[0] # Use the model to predict the label of the waveform tensor = tensor.to(device) tensor = transform(tensor) tensor = model(tensor.unsqueeze(0)) tensor = get_likely_index(tensor) tensor = index_to_label(tensor.squeeze()) return tensor def record(seconds=1): from google.colab import output as colab_output from base64 import b64decode from io import BytesIO from pydub import AudioSegment RECORD = ( b"const sleep = time => new Promise(resolve => setTimeout(resolve, time))\n" b"const b2text = blob => new Promise(resolve => {\n" b" const reader = new FileReader()\n" b" reader.onloadend = e => resolve(e.srcElement.result)\n" b" reader.readAsDataURL(blob)\n" b"})\n" b"var record = time => new Promise(async resolve => {\n" b" stream = await navigator.mediaDevices.getUserMedia({ audio: true })\n" b" recorder = new MediaRecorder(stream)\n" b" chunks = []\n" b" recorder.ondataavailable = e => chunks.push(e.data)\n" b" recorder.start()\n" b" await sleep(time)\n" b" recorder.onstop = async ()=>{\n" b" blob = new Blob(chunks)\n" b" text = await b2text(blob)\n" b" resolve(text)\n" b" }\n" b" recorder.stop()\n" b"})" ) RECORD = RECORD.decode("ascii") print(f"Recording started for {seconds} seconds.") display(ipd.Javascript(RECORD)) s = colab_output.eval_js("record(%d)" % (seconds * 1000)) print("Recording ended.") b = b64decode(s.split(",")[1]) fileformat = "wav" filename = f"_audio.{fileformat}" AudioSegment.from_file(BytesIO(b)).export(filename, format=fileformat) return torchaudio.load(filename) model = torch.load('export.pkl',map_location=torch.device('cpu')) gr.Interface(fn=predict, inputs=gr.inputs.Audio(source=record()[0]), outputs=gr.outputs.Label(num_top_classes=3)).launch(share=True)