Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import pipeline | |
import librosa | |
import numpy as np | |
import torch | |
# from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan | |
from transformers import AutoProcessor, AutoModelForCausalLM | |
# checkpoint = "microsoft/speecht5_tts" | |
# tts_processor = SpeechT5Processor.from_pretrained(checkpoint) | |
# tts_model = SpeechT5ForTextToSpeech.from_pretrained(checkpoint) | |
# vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") | |
# ic_processor = AutoProcessor.from_pretrained("microsoft/git-base") | |
# ic_model = AutoModelForCausalLM.from_pretrained("microsoft/git-base") | |
ic_processor = AutoProcessor.from_pretrained("ronniet/git-base-env") | |
ic_model = AutoModelForCausalLM.from_pretrained("ronniet/git-base-env") | |
# def tts(text): | |
# if len(text.strip()) == 0: | |
# return (16000, np.zeros(0).astype(np.int16)) | |
# inputs = tts_processor(text=text, return_tensors="pt") | |
# # limit input length | |
# input_ids = inputs["input_ids"] | |
# input_ids = input_ids[..., :tts_model.config.max_text_positions] | |
# speaker_embedding = np.load("cmu_us_bdl_arctic-wav-arctic_a0009.npy") | |
# speaker_embedding = torch.tensor(speaker_embedding).unsqueeze(0) | |
# speech = tts_model.generate_speech(input_ids, speaker_embedding, vocoder=vocoder) | |
# speech = (speech.numpy() * 32767).astype(np.int16) | |
# return (16000, speech) | |
# captioner = pipeline(model="microsoft/git-base") | |
# tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False, gpu=False) | |
def predict(image): | |
# text = captioner(image)[0]["generated_text"] | |
# audio_output = "output.wav" | |
# tts.tts_to_file(text, speaker=tts.speakers[0], language="en", file_path=audio_output) | |
pixel_values = ic_processor(images=image, return_tensors="pt").pixel_values | |
text_ids = ic_model.generate(pixel_values=pixel_values, max_length=50) | |
text = ic_processor.batch_decode(text_ids, skip_special_tokens=True)[0] | |
# audio = tts(text) | |
return text | |
# theme = gr.themes.Default(primary_hue="#002A5B") | |
demo = gr.Interface( | |
fn=predict, | |
inputs=gr.Image(type="pil",label="Environment"), | |
outputs=gr.Textbox(label="Caption"), | |
css=".gradio-container {background-color: #002A5B}", | |
theme=gr.themes.Soft() | |
) | |
demo.launch() | |