Docfile's picture
Update app.py
7d9bce7 verified
raw
history blame
1.57 kB
import gradio as gr
import os
token=os.environ.get("TOKEN")
os.environ["GOOGLE_API_KEY"] = token
safe = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_NONE",
},
]
from llama_index.llms.gemini import Gemini
from llama_index.multi_modal_llms.gemini import GeminiMultiModal
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
# Chargez l'image
gemini_pro = GeminiMultiModal(model_name="models/gemini-pro-vision")
llm = Gemini(model="models/gemini-pro")
e =""
# Fonction pour générer le contenu
async def generate_content(pro,image):
global e
if not image:
response = await llm.acomplete(pro,safety_settings=safe)
print(response)
e = response.text
print(e)
else:
#response = model.generate_content([pro, image])
response_acomplete = await gemini_pro.acomplete(prompt=pro, image_documents=image,)
print(response_acomplete)
e = response_acomplete
return e
markdown = r"""
e
""".format(e)
# Interface Gradio
iface = gr.Interface(fn=generate_content, inputs=[gr.Textbox(),gr.Image(type='pil')], outputs= gr.Markdown(markdown, latex_delimiters=[{ "left":"$$", "right":"$$", "display": True }]))
iface.launch()