Spaces:
Sleeping
Sleeping
Jacob Molnia
commited on
Commit
•
6f9cada
0
Parent(s):
Add files via upload
Browse files- README.md +12 -0
- app.py +222 -0
- requirements.txt +6 -0
- tests/test_app.py +47 -0
README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: CS553
|
3 |
+
emoji: 🖼
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: red
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.42.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
import torch
|
4 |
+
from transformers import pipeline
|
5 |
+
|
6 |
+
# Inference client setup
|
7 |
+
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
8 |
+
pipe = pipeline("text-generation", "microsoft/Phi-3.5-mini-instruct", torch_dtype=torch.bfloat16, device_map="auto")
|
9 |
+
|
10 |
+
# Global flag to handle cancellation
|
11 |
+
stop_inference = False
|
12 |
+
|
13 |
+
def respond(
|
14 |
+
message,
|
15 |
+
history: list[tuple[str, str]],
|
16 |
+
system_message="You are a friendly Chatbot. Your job is to assist users in emergencies so reply fast but accuratly.",
|
17 |
+
max_tokens=2048,
|
18 |
+
temperature=0.7,
|
19 |
+
top_p=0.95,
|
20 |
+
use_local_model=False,
|
21 |
+
):
|
22 |
+
global stop_inference
|
23 |
+
stop_inference = False # Reset cancellation flag
|
24 |
+
|
25 |
+
# Initialize history if it's None
|
26 |
+
if history is None:
|
27 |
+
history = []
|
28 |
+
|
29 |
+
if use_local_model:
|
30 |
+
# local inference
|
31 |
+
messages = [{"role": "system", "content": system_message}]
|
32 |
+
for val in history:
|
33 |
+
if val[0]:
|
34 |
+
messages.append({"role": "user", "content": val[0]})
|
35 |
+
if val[1]:
|
36 |
+
messages.append({"role": "assistant", "content": val[1]})
|
37 |
+
messages.append({"role": "user", "content": message})
|
38 |
+
|
39 |
+
response = ""
|
40 |
+
for output in pipe(
|
41 |
+
messages,
|
42 |
+
max_new_tokens=max_tokens,
|
43 |
+
temperature=temperature,
|
44 |
+
do_sample=True,
|
45 |
+
top_p=top_p,
|
46 |
+
):
|
47 |
+
if stop_inference:
|
48 |
+
response = "Inference cancelled."
|
49 |
+
yield history + [(message, response)]
|
50 |
+
return
|
51 |
+
token = output['generated_text'][-1]['content']
|
52 |
+
response += token
|
53 |
+
yield history + [(message, response)] # Yield history + new response
|
54 |
+
|
55 |
+
else:
|
56 |
+
# API-based inference
|
57 |
+
messages = [{"role": "system", "content": system_message}]
|
58 |
+
for val in history:
|
59 |
+
if val[0]:
|
60 |
+
messages.append({"role": "user", "content": val[0]})
|
61 |
+
if val[1]:
|
62 |
+
messages.append({"role": "assistant", "content": val[1]})
|
63 |
+
messages.append({"role": "user", "content": message})
|
64 |
+
|
65 |
+
response = ""
|
66 |
+
for message_chunk in client.chat_completion(
|
67 |
+
messages,
|
68 |
+
max_tokens=max_tokens,
|
69 |
+
stream=True,
|
70 |
+
temperature=temperature,
|
71 |
+
top_p=top_p,
|
72 |
+
):
|
73 |
+
if stop_inference:
|
74 |
+
response = "Inference cancelled."
|
75 |
+
yield history + [(message, response)]
|
76 |
+
return
|
77 |
+
if stop_inference:
|
78 |
+
response = "Inference cancelled."
|
79 |
+
break
|
80 |
+
token = message_chunk.choices[0].delta.content
|
81 |
+
response += token
|
82 |
+
yield history + [(message, response)] # Yield history + new response
|
83 |
+
|
84 |
+
|
85 |
+
def cancel_inference():
|
86 |
+
global stop_inference
|
87 |
+
stop_inference = True
|
88 |
+
|
89 |
+
def clear_conversation():
|
90 |
+
return None
|
91 |
+
|
92 |
+
# Custom CSS for an enhanced look
|
93 |
+
custom_css = """
|
94 |
+
body {
|
95 |
+
font-family: 'Roboto', sans-serif;
|
96 |
+
background-color: #f5f7fa;
|
97 |
+
}
|
98 |
+
#main-container {
|
99 |
+
max-width: 900px;
|
100 |
+
margin: 0 auto;
|
101 |
+
padding: 20px;
|
102 |
+
background: white;
|
103 |
+
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
|
104 |
+
border-radius: 15px;
|
105 |
+
}
|
106 |
+
.gradio-container {
|
107 |
+
margin-top: 20px;
|
108 |
+
}
|
109 |
+
.gr-button {
|
110 |
+
background-color: #4a90e2;
|
111 |
+
color: white;
|
112 |
+
border: none;
|
113 |
+
border-radius: 5px;
|
114 |
+
padding: 10px 20px;
|
115 |
+
cursor: pointer;
|
116 |
+
transition: all 0.3s ease;
|
117 |
+
font-weight: bold;
|
118 |
+
}
|
119 |
+
.gr-button:hover {
|
120 |
+
background-color: #357abd;
|
121 |
+
transform: translateY(-2px);
|
122 |
+
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
|
123 |
+
}
|
124 |
+
.gr-button.secondary {
|
125 |
+
background-color: #f0f0f0;
|
126 |
+
color: #333;
|
127 |
+
}
|
128 |
+
.gr-button.secondary:hover {
|
129 |
+
background-color: #e0e0e0;
|
130 |
+
}
|
131 |
+
.gr-button.cancel {
|
132 |
+
background-color: #e74c3c;
|
133 |
+
}
|
134 |
+
.gr-button.cancel:hover {
|
135 |
+
background-color: #c0392b;
|
136 |
+
}
|
137 |
+
.gr-form {
|
138 |
+
border: 1px solid #e0e0e0;
|
139 |
+
padding: 15px;
|
140 |
+
border-radius: 10px;
|
141 |
+
background-color: #f9f9f9;
|
142 |
+
}
|
143 |
+
.gr-box {
|
144 |
+
border-radius: 8px;
|
145 |
+
border: 1px solid #e0e0e0;
|
146 |
+
}
|
147 |
+
.gr-padded {
|
148 |
+
padding: 15px;
|
149 |
+
}
|
150 |
+
.gr-chat {
|
151 |
+
font-size: 16px;
|
152 |
+
border: 1px solid #e0e0e0;
|
153 |
+
border-radius: 10px;
|
154 |
+
overflow: hidden;
|
155 |
+
}
|
156 |
+
.gr-chat .message {
|
157 |
+
padding: 10px 15px;
|
158 |
+
border-bottom: 1px solid #f0f0f0;
|
159 |
+
}
|
160 |
+
.gr-chat .user {
|
161 |
+
background-color: #e8f0fe;
|
162 |
+
}
|
163 |
+
.gr-chat .bot {
|
164 |
+
background-color: #ffffff;
|
165 |
+
}
|
166 |
+
#title {
|
167 |
+
text-align: center;
|
168 |
+
font-size: 2.5em;
|
169 |
+
margin-bottom: 20px;
|
170 |
+
color: #2c3e50;
|
171 |
+
text-shadow: 1px 1px 2px rgba(0,0,0,0.1);
|
172 |
+
}
|
173 |
+
"""
|
174 |
+
|
175 |
+
# Define the interface
|
176 |
+
with gr.Blocks(css=custom_css) as demo:
|
177 |
+
gr.Markdown("<h1 id='title'>🤖 EMERGENCY RESPONSE BOT 🚀</h1>")
|
178 |
+
gr.Markdown("Engage in a conversation with our AI chatbot using customizable settings. \n It's a demo bot for a emergency response system. \n NOTE: This bot was made for educational purposes only and should not be used in real emergencies.")
|
179 |
+
|
180 |
+
with gr.Row():
|
181 |
+
with gr.Column(scale=2):
|
182 |
+
chat_history = gr.Chatbot(label="Chat", height=500)
|
183 |
+
user_input = gr.Textbox(show_label=False, placeholder="Type your message here...", lines=2)
|
184 |
+
with gr.Row():
|
185 |
+
submit_button = gr.Button("Send", variant="primary")
|
186 |
+
cancel_button = gr.Button("Cancel", variant="stop")
|
187 |
+
clear_button = gr.Button("Clear Chat", variant="secondary")
|
188 |
+
|
189 |
+
with gr.Column(scale=1):
|
190 |
+
with gr.Accordion("Advanced Settings", open=False):
|
191 |
+
system_message = gr.Textbox(value="You are a friendly Chatbot.", label="System message", interactive=True)
|
192 |
+
use_local_model = gr.Checkbox(label="Use Local Model", value=False)
|
193 |
+
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens")
|
194 |
+
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
195 |
+
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
|
196 |
+
|
197 |
+
with gr.Accordion("Chat Information", open=True):
|
198 |
+
message_count = gr.Number(label="Messages in Conversation", value=0, interactive=False)
|
199 |
+
word_count = gr.Number(label="Total Words", value=0, interactive=False)
|
200 |
+
|
201 |
+
# Event handlers
|
202 |
+
submit_button.click(respond,
|
203 |
+
[user_input, chat_history, system_message, max_tokens, temperature, top_p, use_local_model],
|
204 |
+
[chat_history])
|
205 |
+
user_input.submit(respond,
|
206 |
+
[user_input, chat_history, system_message, max_tokens, temperature, top_p, use_local_model],
|
207 |
+
[chat_history])
|
208 |
+
cancel_button.click(cancel_inference)
|
209 |
+
clear_button.click(clear_conversation, outputs=[chat_history])
|
210 |
+
|
211 |
+
# Update chat information
|
212 |
+
def update_chat_info(history):
|
213 |
+
if history is None:
|
214 |
+
return 0, 0
|
215 |
+
message_count = len(history)
|
216 |
+
word_count = sum(len(msg[0].split()) + len(msg[1].split()) for msg in history)
|
217 |
+
return message_count, word_count
|
218 |
+
|
219 |
+
chat_history.change(update_chat_info, inputs=[chat_history], outputs=[message_count, word_count])
|
220 |
+
|
221 |
+
if __name__ == "__main__":
|
222 |
+
demo.launch(share=False) # Remove share=True because it's not supported on HF Spaces
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface_hub==0.23.*
|
2 |
+
gradio==4.39.*
|
3 |
+
torch==2.4.*
|
4 |
+
transformers==4.43.*
|
5 |
+
accelerate==0.33.*
|
6 |
+
pytest==7.4.3
|
tests/test_app.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import sys
|
3 |
+
import os
|
4 |
+
|
5 |
+
# Add the parent directory to the Python path
|
6 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
7 |
+
|
8 |
+
from app import clear_conversation, update_chat_info, cancel_inference, respond, custom_css, demo
|
9 |
+
|
10 |
+
def test_clear_conversation():
|
11 |
+
result = clear_conversation()
|
12 |
+
assert result is None
|
13 |
+
|
14 |
+
def test_update_chat_info_empty():
|
15 |
+
history = None
|
16 |
+
message_count, word_count = update_chat_info(history)
|
17 |
+
assert message_count == 0
|
18 |
+
assert word_count == 0
|
19 |
+
|
20 |
+
def test_cancel_inference():
|
21 |
+
cancel_inference()
|
22 |
+
from app import stop_inference
|
23 |
+
assert stop_inference == True
|
24 |
+
|
25 |
+
def test_respond_input_types():
|
26 |
+
message = "Test message"
|
27 |
+
history = [("User", "Hello"), ("Bot", "Hi")]
|
28 |
+
system_message = "You are a test bot"
|
29 |
+
max_tokens = 100
|
30 |
+
temperature = 0.7
|
31 |
+
top_p = 0.9
|
32 |
+
use_local_model = False
|
33 |
+
|
34 |
+
generator = respond(message, history, system_message, max_tokens, temperature, top_p, use_local_model)
|
35 |
+
|
36 |
+
assert hasattr(generator, '__next__') # Check if it's a generator
|
37 |
+
|
38 |
+
def test_custom_css_exists():
|
39 |
+
assert isinstance(custom_css, str)
|
40 |
+
assert len(custom_css) > 0
|
41 |
+
|
42 |
+
def test_demo_object_creation():
|
43 |
+
assert demo is not None
|
44 |
+
assert hasattr(demo, 'launch')
|
45 |
+
|
46 |
+
if __name__ == "__main__":
|
47 |
+
pytest.main([__file__])
|