File size: 1,806 Bytes
6fe47f5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import gradio as gr
import tensorflow as tf
from transformers import BertTokenizer, TFBertModel
import numpy as np
# Load your model
model = tf.keras.models.load_model('models/model_files')
# Load tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def preprocess_text(text):
inputs = tokenizer(text, return_tensors='tf', padding=True, truncation=True, max_length=512)
return inputs
def predict(text, image, structured):
text_inputs = preprocess_text(text)
image = tf.image.resize(image, (224, 224))
image = tf.keras.applications.resnet50.preprocess_input(image)
structured = (structured - structured.mean()) / structured.std()
prediction = model.predict([text_inputs['input_ids'], text_inputs['attention_mask'], image, structured])
return prediction[0][0]
# Define the chat function
def chat_response(user_input):
return f"Model response to: {user_input}"
# Define the code execution function
def execute_code(code):
exec_globals = {}
exec(code, exec_globals)
return exec_globals.get("output", "No output")
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
chat_input = gr.Textbox(lines=2, placeholder="Enter your message here...")
chat_output = gr.Textbox(lines=5, placeholder="Model response will appear here...")
chat_button = gr.Button("Send")
with gr.Column():
code_input = gr.Textbox(lines=10, placeholder="Enter your code here...")
code_output = gr.Textbox(lines=5, placeholder="Code output will appear here...")
code_button = gr.Button("Run Code")
chat_button.click(chat_response, inputs=chat_input, outputs=chat_output)
code_button.click(execute_code, inputs=code_input, outputs=code_output)
demo.launch() |