|
import librosa |
|
import numpy as np |
|
from .init import pipe |
|
|
|
TASK = "transcribe" |
|
BATCH_SIZE = 8 |
|
LIMIT = 60 |
|
SAMPLING_RATE = 16000 |
|
|
|
class A2T: |
|
def __init__(self, mic): |
|
self.mic = mic |
|
|
|
def __transcribe(self, inputs, task: str = None, lang: str = "english"): |
|
if inputs is None: |
|
print("Inputs None") |
|
|
|
transcribed_text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task, "language": lang}, return_timestamps=True)["text"] |
|
return transcribed_text |
|
|
|
def __preprocces(self, raw: np.ndarray, sampling_rate: int): |
|
|
|
if sampling_rate != SAMPLING_RATE: |
|
raw = librosa.resample(raw, orig_sr=sampling_rate, target_sr=SAMPLING_RATE) |
|
|
|
print(f"Sampling rate : {raw} max chunk : {np.max(raw)}") |
|
|
|
chunk = raw.astype(np.float32, order='C') / 32768.0 |
|
|
|
print(f"Chunk : {chunk} max chunk : {np.max(chunk)}") |
|
|
|
if len(chunk.shape) > 1: |
|
chunk = librosa.to_mono(chunk.T) |
|
|
|
chunk = chunk[:SAMPLING_RATE*LIMIT] |
|
|
|
print(f"Chunk cut : {chunk} max chunk : {np.max(chunk)}") |
|
|
|
return chunk |
|
|
|
def predict(self): |
|
try: |
|
if self.mic is not None: |
|
raw = self.mic.get_array_of_samples() |
|
chunk = np.array(raw, dtype=np.int16) |
|
sampling_rate = self.mic.frame_rate |
|
audio = self.__preprocces(raw=chunk, sampling_rate=sampling_rate) |
|
print(f"audio : {audio} \n shape : {audio.shape} \n max : {np.max(audio)} \n shape of chunk : {chunk.shape} \n sampling rate : {sampling_rate} \n max chunk : {np.max(chunk)} \n chunk : {chunk}") |
|
else: |
|
raise Exception("please provide audio") |
|
|
|
if isinstance(audio , np.ndarray): |
|
return self.__transcribe(inputs=audio, task=TASK) |
|
else: |
|
raise Exception("Audio is not np array") |
|
|
|
except Exception as e: |
|
return f"Oops some kinda error : {e}" |
|
|