ankitdwivedi31 commited on
Commit
9cc668b
1 Parent(s): 0833e31

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -0
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ from transformers import pipeline
4
+ import scipy.io.wavfile as wavfile
5
+ import numpy as np
6
+ # import torch
7
+
8
+ # device = "cuda" if torch.cuda.is_available else "cpu"
9
+
10
+ # model_path = "C:/Users/ankitdwivedi/OneDrive - Adobe/Desktop/NLP Projects/Video to Text Summarization/Model/models--Salesforce--blip-image-captioning-large/snapshots/2227ac38c9f16105cb0412e7cab4759978a8fd90"
11
+ # caption_image = pipeline("image-to-text", model=model_path)
12
+ caption_image = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
13
+ # tts_model_path = "C:/Users/ankitdwivedi/OneDrive - Adobe/Desktop/NLP Projects/Video to Text Summarization/Model/models--kakao-enterprise--vits-ljs/snapshots/3bcb8321394f671bd948ebf0d086d694dda95464"
14
+ # Narrator = pipeline("text-to-speech", model=tts_model_path)
15
+ Narrator = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs")
16
+
17
+ def generate_audio(text):
18
+ Narrated_Text = Narrator(text)
19
+ audio_data = np.array(Narrated_Text["audio"][0])
20
+ sampling_rate = Narrated_Text["sampling_rate"]
21
+ wavfile.write("generated_audio.wav", rate=sampling_rate, data=audio_data)
22
+ return "generated_audio.wav"
23
+
24
+ def caption_my_image(pil_image):
25
+ semantics = caption_image(images=pil_image)[0]["generated_text"]
26
+ return generate_audio(semantics)
27
+
28
+ demo = gr.Interface(fn=caption_my_image,
29
+ inputs=[gr.Image(label="Select Image",type="pil")],
30
+ outputs=[gr.Audio(label="Generated_Audio")],
31
+ title="Project 8: Audio Caption Image ",
32
+ description="THIS APPLICATION WILL BE USED TO provide Audio caption for the Image")
33
+ demo.launch()