Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
from utilities import chat_with_interpreter, completion, process_file | |
def setup_gradio_interfaces(): | |
chat_interface = gr.ChatInterface( | |
fn=chat_with_interpreter, | |
examples=["サンプルHTMLの作成", "google spreadの読み込み作成", "merhaba"], | |
title="Auto Program", | |
css=".chat-container { height: 1500px; }", | |
) | |
chat_interface2 = gr.ChatInterface( | |
fn=chat_with_interpreter, | |
examples=["こんにちは", "どうしたの?"], | |
title="Auto Program 2", | |
) | |
chat_interface2.queue() | |
demo4 = gr.ChatInterface( | |
chat_with_interpreter, | |
additional_inputs=[ | |
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), | |
], | |
) | |
democs = gr.Interface( | |
fn=process_file, | |
inputs=[ | |
"file", | |
gr.Textbox(label="Additional Notes", lines=10), | |
gr.Textbox(label="Folder Name"), | |
], | |
outputs="text", | |
) | |
with gr.Blocks(fill_height=True, css="") as demo: | |
demo = gr.ChatInterface( | |
fn=chat_with_interpreter, | |
chatbot=gr.Chatbot(height=650, placeholder="PLACEHOLDER", label="Gradio ChatInterface"), | |
fill_height=True, | |
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False), | |
additional_inputs=[ | |
gr.Slider(minimum=0, maximum=1, step=0.1, value=0.95, label="Temperature", render=False), | |
gr.Slider(minimum=128, maximum=4096, step=1, value=512, label="Max new tokens", render=False), | |
], | |
examples=[ | |
["HTMLのサンプルを作成して"], | |
["CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml"], | |
], | |
cache_examples=False, | |
) | |
with gr.Blocks(fill_height=True, css="") as democ: | |
gr.ChatInterface( | |
fn=completion, | |
chatbot=gr.Chatbot(height=450, placeholder="PLACEHOLDER", label="Gradio ChatInterface"), | |
fill_height=True, | |
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False), | |
additional_inputs=[ | |
gr.Slider(minimum=0, maximum=1, step=0.1, value=0.95, label="Temperature", render=False), | |
gr.Slider(minimum=128, maximum=4096, step=1, value=512, label="Max new tokens", render=False), | |
], | |
examples=[ | |
["HTMLのサンプルを作成して"], | |
["CUDA_VISIBLE_DEVICES=0 llamafactory-cli train examples/lora_single_gpu/llama3_lora_sft.yaml"], | |
], | |
cache_examples=False, | |
) | |
gr.Markdown("--- Built with Meta Llama 3") | |
default_interfaces = [demo, demo4, democ, democs] | |
default_names = ["AIで開発", "FineTuning", "Chat", "仕様書から作成"] | |
tabs = gr.TabbedInterface(default_interfaces, default_names) | |
tabs.queue() | |
return tabs | |