File size: 2,879 Bytes
a95e64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e9010c
a95e64c
 
 
 
 
 
 
 
1db1226
a95e64c
 
 
4e9010c
a95e64c
 
 
 
 
4208212
a95e64c
35ca684
a95e64c
 
 
 
 
 
 
 
 
4e9010c
a95e64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1db1226
a95e64c
7e652b2
a95e64c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import soundfile as sf
import datetime
from pyctcdecode import BeamSearchDecoderCTC
import torch
import os
import time
import gc
import gradio as gr
import librosa
from transformers import Wav2Vec2ForCTC, Wav2Vec2ProcessorWithLM, AutoModelForSeq2SeqLM, AutoTokenizer
from numba import cuda

# load pretrained model
model = Wav2Vec2ForCTC.from_pretrained("facebook/mms-1b-all")
processor = Wav2Vec2ProcessorWithLM.from_pretrained("jlonsako/mms-1b-all-AmhLM")


#Define Functions

#convert time into .sbv format
def format_time(seconds):
    # Convert seconds to hh:mm:ss,ms format
    return str(datetime.timedelta(seconds=seconds)).replace('.', ',')

#Convert Video/Audio into 16K wav file
def preprocessAudio(audioFile):
    os.system(f"ffmpeg -y -i {audioFile.name} -ar 16000 ./audioToConvert.wav")

#Transcribe!!!
def Transcribe(file):
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    start_time = time.time()
    model.load_adapter("amh")

    preprocessAudio(file)
    block_size = 30 #30 second chunks of audio

    transcripts = []
    stream = librosa.stream(
        "./audioToConvert.wav",
        block_length=block_size,
        frame_length=16000,
        hop_length=16000
    )

    model = model.half()
    model.to(device)
    print(f"Model loaded to {device}: Entering transcription phase")

    #Code for timestamping
    encoding_start = 0
    sbv_file = open("subtitle.sbv", "w")

    for speech_segment in stream:  
        if len(speech_segment.shape) > 1:
            speech_segment = speech_segment[:,0] + speech_segment[:,1]
        input_values = processor(speech_segment, sampling_rate=16_000, return_tensors="pt").input_values.to(device)
        input_values = input_values.half()
        with torch.no_grad():
            logits = model(input_values).logits
        if len(logits.shape) == 1:
            logits = logits.unsqueeze(0)
        transcription = processor.batch_decode(logits.cpu().numpy()).text
        transcripts.append(transcription[0])

        #Generate timestamps
        encoding_end = encoding_start + block_size
        formatted_start = format_time(encoding_start)
        formatted_end = format_time(encoding_end)

        #Write to the .sbv file
        sbv_file.write(f"{formatted_start},{formatted_end}\n")
        sbv_file.write(f"{transcription[0]}\n\n")
        encoding_start = encoding_end

        # Freeing up memory
        del input_values
        del logits
        del transcription
        torch.cuda.empty_cache()
        gc.collect()

    # Join all transcripts into a single transcript
    transcript = ' '.join(transcripts)
    sbv_file.close()

    end_time = time.time()
    os.system("rm ./audio.wav")
    print(f"The script ran for {end_time - start_time} seconds.")
    return("./subtitle.sbv")
    
demo = gr.Interface(fn=Transcribe, inputs=gr.File(), outputs="file")
demo.launch()