LenooxAi-D1 / app.py
OminduAnjana's picture
Update app.py
f084620 verified
import gradio as gr
from huggingface_hub import InferenceClient
import time
# Initialize the client
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Custom CSS for better styling
custom_css = """
.container {
max-width: 800px;
margin: auto;
padding: 20px;
}
.chat-message {
padding: 15px;
border-radius: 10px;
margin-bottom: 10px;
}
.user-message {
background-color: #e3f2fd;
}
.bot-message {
background-color: #f5f5f5;
}
.controls-container {
background-color: #ffffff;
padding: 20px;
border-radius: 10px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
margin-top: 20px;
}
"""
def format_timestamp():
return time.strftime("%H:%M:%S")
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
model_name,
):
messages = [{"role": "system", "content": system_message}]
# Add timestamp to messages
timestamp = format_timestamp()
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
try:
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
except Exception as e:
yield f"Error: {str(e)}"
def create_demo():
# Theme configuration
theme = gr.themes.Default().set(
font=["Inter", "ui-sans-serif", "system-ui"],
radius_size=gr.themes.sizes.radius_sm,
)
with gr.Blocks(theme=theme, css=custom_css) as demo:
with gr.Row():
with gr.Column(scale=3):
gr.Markdown("# 🤖 Advanced Chat Interface")
with gr.Row():
with gr.Column(scale=3):
chatbot = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(
value="You are a friendly and helpful AI assistant.",
label="System Message",
lines=2
),
gr.Slider(
minimum=1,
maximum=2048,
value=512,
step=1,
label="Max Tokens",
info="Maximum number of tokens to generate"
),
gr.Slider(
minimum=0.1,
maximum=4.0,
value=0.7,
step=0.1,
label="Temperature",
info="Higher values make output more random"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (Nucleus Sampling)",
info="Controls diversity of generated text"
),
gr.Dropdown(
choices=["HuggingFaceH4/zephyr-7b-beta", "other-model-1", "other-model-2"],
value="HuggingFaceH4/zephyr-7b-beta",
label="Model",
info="Select the model to use"
),
],
title="Advanced Chat Interface",
description="Chat with an AI assistant powered by Hugging Face models.",
)
with gr.Column(scale=1):
with gr.Accordion("Chat Information", open=True):
total_messages = gr.Number(value=0, label="Total Messages", interactive=False)
chat_duration = gr.Number(value=0, label="Chat Duration (min)", interactive=False)
with gr.Accordion("Advanced Settings", open=False):
gr.Checkbox(label="Enable Debug Mode", value=False)
gr.Checkbox(label="Save Chat History", value=True)
gr.Radio(
choices=["Simple", "Detailed"],
value="Simple",
label="Response Mode"
)
return demo
if __name__ == "__main__":
demo = create_demo()
demo.launch(share=True, server_name="0.0.0.0", server_port=7860)