import streamlit as st
# import openai
import replicate
import os
from dotenv import load_dotenv
from streamlit_extras.stylable_container import stylable_container
import streamlit_extras
load_dotenv()
REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
replicate = replicate.Client(api_token=REPLICATE_API_TOKEN)
streamlit_style = """
"""
def page7():
with stylable_container(
key="title",
css_styles=[
""" span {
text-align: center;
padding-top: 0px;
padding-right: 0px;
padding-bottom: 0px;
padding-left: 0px;
}"""
,
"""
st-emotion-cache-0{
text-align: center;
padding-top: 0px;
padding-right: 0px;
padding-bottom: 0px;
padding-left: 0px;
}""",
"""
.e1f1d6gn0{
text-align: center;
padding-top: 0px;
padding-right: 0px;
padding-bottom: 0px;
padding-left: 0px;
}
""",
],
):
st.markdown("
Image to Video
", unsafe_allow_html=True) #This is under a css style
st.markdown(streamlit_style, unsafe_allow_html=True)
image_file=st.file_uploader("Select Image", type=['jpeg','jpg','png'])
if image_file is not None:
placeholder=st.empty()
col1,col2=placeholder.columns(2)
col1.text("Uploaded Image")
col1.image(image_file)
prompt = st.text_input(label='Enter text prompt for Video generation')
submit_button = st.button(label='Generate Video')
if submit_button:
if prompt and (image_file is not None):
with st.spinner("Generating Video. It may require few minutes so please wait...."):
output = replicate.run(
"ali-vilab/i2vgen-xl:5821a338d00033abaaba89080a17eb8783d9a17ed710a6b4246a18e0900ccad4",
input={
"image": image_file,
"prompt": prompt,
"max_frames": 25,
"guidance_scale": 9,
"num_inference_steps": 50
}
)
col2.text("Generated Video from Image")
col2.video(output)
st.markdown(
"""
""",
unsafe_allow_html=True,
)