File size: 4,878 Bytes
294c0fc
 
f084620
294c0fc
f084620
294c0fc
 
f084620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294c0fc
 
 
 
 
 
 
 
f084620
294c0fc
 
f084620
 
 
 
294c0fc
 
 
 
 
f084620
294c0fc
f084620
294c0fc
f084620
 
 
 
 
 
 
 
 
 
 
 
 
294c0fc
f084620
 
 
 
 
 
294c0fc
f084620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294c0fc
f084620
294c0fc
 
f084620
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import gradio as gr
from huggingface_hub import InferenceClient
import time

# Initialize the client
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

# Custom CSS for better styling
custom_css = """
.container {
    max-width: 800px;
    margin: auto;
    padding: 20px;
}

.chat-message {
    padding: 15px;
    border-radius: 10px;
    margin-bottom: 10px;
}

.user-message {
    background-color: #e3f2fd;
}

.bot-message {
    background-color: #f5f5f5;
}

.controls-container {
    background-color: #ffffff;
    padding: 20px;
    border-radius: 10px;
    box-shadow: 0 2px 4px rgba(0,0,0,0.1);
    margin-top: 20px;
}
"""

def format_timestamp():
    return time.strftime("%H:%M:%S")

def respond(
    message,
    history: list[tuple[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
    model_name,
):
    messages = [{"role": "system", "content": system_message}]
    
    # Add timestamp to messages
    timestamp = format_timestamp()
    
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})
    
    messages.append({"role": "user", "content": message})
    
    response = ""
    try:
        for message in client.chat_completion(
            messages,
            max_tokens=max_tokens,
            stream=True,
            temperature=temperature,
            top_p=top_p,
        ):
            token = message.choices[0].delta.content
            response += token
            yield response
    except Exception as e:
        yield f"Error: {str(e)}"

def create_demo():
    # Theme configuration
    theme = gr.themes.Default().set(
        font=["Inter", "ui-sans-serif", "system-ui"],
        radius_size=gr.themes.sizes.radius_sm,
    )

    with gr.Blocks(theme=theme, css=custom_css) as demo:
        with gr.Row():
            with gr.Column(scale=3):
                gr.Markdown("# 🤖 Advanced Chat Interface")
        
        with gr.Row():
            with gr.Column(scale=3):
                chatbot = gr.ChatInterface(
                    respond,
                    additional_inputs=[
                        gr.Textbox(
                            value="You are a friendly and helpful AI assistant.",
                            label="System Message",
                            lines=2
                        ),
                        gr.Slider(
                            minimum=1,
                            maximum=2048,
                            value=512,
                            step=1,
                            label="Max Tokens",
                            info="Maximum number of tokens to generate"
                        ),
                        gr.Slider(
                            minimum=0.1,
                            maximum=4.0,
                            value=0.7,
                            step=0.1,
                            label="Temperature",
                            info="Higher values make output more random"
                        ),
                        gr.Slider(
                            minimum=0.1,
                            maximum=1.0,
                            value=0.95,
                            step=0.05,
                            label="Top-p (Nucleus Sampling)",
                            info="Controls diversity of generated text"
                        ),
                        gr.Dropdown(
                            choices=["HuggingFaceH4/zephyr-7b-beta", "other-model-1", "other-model-2"],
                            value="HuggingFaceH4/zephyr-7b-beta",
                            label="Model",
                            info="Select the model to use"
                        ),
                    ],
                    title="Advanced Chat Interface",
                    description="Chat with an AI assistant powered by Hugging Face models.",
                )
            
            with gr.Column(scale=1):
                with gr.Accordion("Chat Information", open=True):
                    total_messages = gr.Number(value=0, label="Total Messages", interactive=False)
                    chat_duration = gr.Number(value=0, label="Chat Duration (min)", interactive=False)
                    
                with gr.Accordion("Advanced Settings", open=False):
                    gr.Checkbox(label="Enable Debug Mode", value=False)
                    gr.Checkbox(label="Save Chat History", value=True)
                    gr.Radio(
                        choices=["Simple", "Detailed"],
                        value="Simple",
                        label="Response Mode"
                    )

    return demo

if __name__ == "__main__":
    demo = create_demo()
    demo.launch(share=True, server_name="0.0.0.0", server_port=7860)