File size: 3,298 Bytes
35e37a6 c7686f1 35e37a6 481a8d0 35e37a6 d437c52 35e37a6 d6e794e 8810f92 35e37a6 0d792db 35e37a6 c7686f1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
# import required packages
import google.generativeai as genai
import os
import PIL.Image
import gradio as gr
from gradio_multimodalchatbot import MultimodalChatbot
from gradio.data_classes import FileData
# Fetch an environment variable.
GG_API_KEY = os.environ.get('GG_API_KEY')
oaiusr = os.environ.get("OAI_USR")
oaipwd = os.environ.get("OAI_PWD")
genai.configure(api_key=GG_API_KEY)
model = genai.GenerativeModel('gemini-1.5-flash-latest')
modelvis = genai.GenerativeModel('gemini-1.5-flash-latest')
def gemini(input, file, chatbot=[]):
messages = []
print(chatbot)
# Process previous chatbot messages if present
if len(chatbot) != 0:
for user, bot in chatbot:
user, bot = user.text, bot.text
messages.extend([
{'role': 'user', 'parts': [user]},
{'role': 'model', 'parts': [bot]}
])
messages.append({'role': 'user', 'parts': [input]})
else:
messages.append({'role': 'user', 'parts': [input]})
try:
# Process image if file is provided
if file is not None:
with PIL.Image.open(file.name) as img:
message = [{'role': 'user', 'parts': [input, img]}]
response = modelvis.generate_content(message)
gemini_video_resp = response.text
messages.append({'role': 'model', 'parts': [gemini_video_resp]})
# Construct list of messages in the required format
user_msg = {"text": input, "files": [{"file": FileData(path=file.name)}]}
bot_msg = {"text": gemini_video_resp, "files": []}
chatbot.append([user_msg, bot_msg])
else:
response = model.generate_content(messages)
gemini_resp = response.text
# Construct list of messages in the required format
user_msg = {"text": input, "files": []}
bot_msg = {"text": gemini_resp, "files": []}
chatbot.append([user_msg, bot_msg])
except Exception as e:
# Handling exceptions and raising error to the modal
print(f"An error occurred: {e}")
raise gr.Error(e)
return chatbot, "", None
# Define the Gradio Blocks interface
with gr.Blocks() as demo:
# Add a centered header using HTML
gr.HTML("<center><h2>✨Tomoniai's Gemini Chat✨</h2></center>")
# Initialize the MultimodalChatbot component
multi = MultimodalChatbot(value=[], label="Gemini", height=400, show_copy_button=True, bubble_full_width=False, likeable=True)
with gr.Row():
# Textbox for user input with increased scale for better visibility
tb = gr.Textbox(show_label=False, scale=4)
# Upload button for image files
up = gr.UploadButton("Upload Image", file_types=["image"], scale=1)
# Define the behavior on text submission
tb.submit(gemini, [tb, up, multi], [multi, tb, up])
# Define the behavior on image upload
# Using chained then() calls to update the upload button's state
up.upload(lambda: gr.UploadButton("Uploading Image..."), [], up) \
.then(lambda: gr.UploadButton("Image Uploaded"), [], up) \
.then(lambda: gr.UploadButton("Upload Image"), [], up)
demo.queue().launch(auth=(oaiusr, oaipwd),show_api=False) |