monster119120 commited on
Commit
975abec
1 Parent(s): 0d402d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -34
app.py CHANGED
@@ -3,44 +3,48 @@ import os
3
  os.system('pip install -r requirements.txt')
4
 
5
  import streamlit as st
6
- from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
7
  from datasets import load_dataset
8
  import torch
9
  import soundfile as sf
10
- from transformers import pipeline
11
  from PIL import Image
12
  import io
13
 
14
- st.title('Video to text and then text to speech app')
15
-
16
-
17
- image = st.file_uploader("Upload an image", type=["jpg", "png"])
18
-
19
- question = st.text_input(
20
- label="Enter your question",
21
- value = "How many people and what is the color of this image?"
22
- )
23
-
24
- def generate_speech(text):
25
- processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
26
- model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
27
- vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
28
  inputs = processor(text=text, return_tensors="pt")
29
-
30
- embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
31
- speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
32
-
33
- speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
34
-
35
- sf.write("speech.wav", speech.numpy(), samplerate=16000)
36
-
37
- if st.button("Generate"):
38
- image = Image.open(io.BytesIO(image.getvalue()))
39
- vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa")
40
- vqa_result = vqa_pipeline({"image": image, "question": question})
41
- answer = vqa_result[0]['answer']
42
- st.write(f"Question: {question} Answer: {answer}") # 显示回答
43
- generate_speech(f"Question: {question}, Answer: {answer}")
44
- audio_file = open("speech.wav", 'rb')
45
- audio_bytes = audio_file.read()
46
- st.audio(audio_bytes, format="audio/wav")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  os.system('pip install -r requirements.txt')
4
 
5
  import streamlit as st
6
+ from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, pipeline
7
  from datasets import load_dataset
8
  import torch
9
  import soundfile as sf
 
10
  from PIL import Image
11
  import io
12
 
13
+ # 定义生成语音的函数
14
+ def generate_speech(text, model, processor, vocoder, speaker_embeddings):
 
 
 
 
 
 
 
 
 
 
 
 
15
  inputs = processor(text=text, return_tensors="pt")
16
+ generated_speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder)
17
+ sf.write("speech.wav", generated_speech.numpy(), samplerate=16000)
18
+ return "speech.wav"
19
+
20
+ # 初始化模型和处理器
21
+ processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
22
+ model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
23
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
24
+ embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
25
+ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
26
+
27
+ # Streamlit UI
28
+ st.title('Visual Question Answering and Text-to-Speech App')
29
+
30
+ uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png"])
31
+ default_question = "How many people and what is the color of this image?"
32
+ user_question = st.text_input("Enter your question", value=default_question)
33
+
34
+ if st.button("Answer and Generate Speech"):
35
+ if uploaded_image:
36
+ image = Image.open(io.BytesIO(uploaded_image.getvalue()))
37
+ vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa")
38
+ vqa_result = vqa_pipeline({"image": image, "question": user_question})
39
+ answer = vqa_result[0]['answer'] # Assume the answer is in the first result
40
+
41
+ display_text = f"Question: {user_question} Answer: {answer}"
42
+ st.write(display_text) # Display the answer
43
+
44
+ # Generate and play speech
45
+ audio_path = generate_speech(display_text, model, processor, vocoder, speaker_embeddings)
46
+ audio_file = open(audio_path, 'rb')
47
+ audio_bytes = audio_file.read()
48
+ st.audio(audio_bytes, format="audio/wav")
49
+ else:
50
+ st.write("Please upload an image and enter a question.")