Spaces:
Sleeping
Sleeping
alex buz
commited on
Commit
•
e6868fd
1
Parent(s):
a7368c8
final
Browse files- .gitignore +2 -1
- 2t.py +28 -0
- 3t_dropdown copy 2.py +100 -0
- 3t_dropdown copy 3.py +179 -0
- 3t_dropdown copy.py +34 -0
- 3t_dropdown.py +159 -0
- 3t_dropdown_CHAT.py +152 -0
- app copy 2.py +67 -0
- app copy 3.py +26 -0
- app copy 4.py +43 -0
- app copy 5.py +100 -0
- app copy 6_blocked_scroll.py +102 -0
- app copy 6_record_chatbot.py +53 -0
- app copy 6_working.py +102 -0
- app copy _6_working.py +101 -0
- app.py +72 -36
- app_Chat_working.py +53 -0
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
cache
|
|
|
|
1 |
+
cache
|
2 |
+
.env
|
2t.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
def predict(message, history, api_key):
|
3 |
+
print('in predict')
|
4 |
+
client = OpenAI(api_key=api_key)
|
5 |
+
history_openai_format = []
|
6 |
+
for human, assistant in history:
|
7 |
+
history_openai_format.append({"role": "user", "content": human})
|
8 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
9 |
+
history_openai_format.append({"role": "user", "content": message})
|
10 |
+
print(history_openai_format)
|
11 |
+
response = client.chat.completions.create(
|
12 |
+
model='gpt-4o',
|
13 |
+
messages=history_openai_format,
|
14 |
+
temperature=1.0,
|
15 |
+
stream=True
|
16 |
+
)
|
17 |
+
|
18 |
+
partial_message = ""
|
19 |
+
for chunk in response:
|
20 |
+
#print(chunk)
|
21 |
+
if chunk.choices[0].delta.content:
|
22 |
+
#print(111, chunk.choices[0].delta.content)
|
23 |
+
partial_message += chunk.choices[0].delta.content
|
24 |
+
yield partial_message
|
25 |
+
|
26 |
+
|
27 |
+
for msg in predict('Hi',[],'sk-proj-FuXWodUyfcvTYcc6K9ekT3BlbkFJ2eV3qu7UZOAWlW2eWRLQ'):
|
28 |
+
print(msg)
|
3t_dropdown copy 2.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from openai import OpenAI
|
3 |
+
import time
|
4 |
+
import html
|
5 |
+
|
6 |
+
def predict(message, history, character, api_key, progress=gr.Progress()):
|
7 |
+
client = OpenAI(api_key=api_key)
|
8 |
+
history_openai_format = []
|
9 |
+
for human, assistant in history:
|
10 |
+
history_openai_format.append({"role": "user", "content": human})
|
11 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
12 |
+
history_openai_format.append({"role": "user", "content": message})
|
13 |
+
|
14 |
+
response = client.chat.completions.create(
|
15 |
+
model='gpt-4',
|
16 |
+
messages=history_openai_format,
|
17 |
+
temperature=1.0,
|
18 |
+
stream=True
|
19 |
+
)
|
20 |
+
|
21 |
+
partial_message = ""
|
22 |
+
for chunk in progress.tqdm(response, desc="Generating"):
|
23 |
+
if chunk.choices[0].delta.content:
|
24 |
+
partial_message += chunk.choices[0].delta.content
|
25 |
+
yield partial_message
|
26 |
+
time.sleep(0.01)
|
27 |
+
|
28 |
+
def format_history(history):
|
29 |
+
html_content = ""
|
30 |
+
for human, ai in history:
|
31 |
+
human_formatted = html.escape(human).replace('\n', '<br>')
|
32 |
+
html_content += f'<div class="message user-message"><strong>You:</strong> {human_formatted}</div>'
|
33 |
+
if ai:
|
34 |
+
ai_formatted = html.escape(ai).replace('\n', '<br>')
|
35 |
+
html_content += f'<div class="message ai-message"><strong>AI:</strong> {ai_formatted}</div>'
|
36 |
+
return html_content
|
37 |
+
|
38 |
+
css = """
|
39 |
+
#chat-display {
|
40 |
+
height: 600px;
|
41 |
+
overflow-y: auto;
|
42 |
+
border: 1px solid #ccc;
|
43 |
+
padding: 10px;
|
44 |
+
margin-bottom: 10px;
|
45 |
+
}
|
46 |
+
.message {
|
47 |
+
margin-bottom: 10px;
|
48 |
+
word-wrap: break-word;
|
49 |
+
}
|
50 |
+
.user-message {
|
51 |
+
background-color: #e6f3ff;
|
52 |
+
padding: 5px;
|
53 |
+
border-radius: 5px;
|
54 |
+
}
|
55 |
+
.ai-message {
|
56 |
+
background-color: #f0f0f0;
|
57 |
+
padding: 5px;
|
58 |
+
border-radius: 5px;
|
59 |
+
}
|
60 |
+
"""
|
61 |
+
|
62 |
+
def user(user_message, history, character, api_key):
|
63 |
+
if user_message.strip() == "":
|
64 |
+
return "", history, format_history(history)
|
65 |
+
history.append([user_message, None])
|
66 |
+
formatted_history = format_history(history)
|
67 |
+
|
68 |
+
# Start bot response generation
|
69 |
+
bot_message_generator = predict(user_message, history[:-1], character, api_key)
|
70 |
+
for chunk in bot_message_generator:
|
71 |
+
history[-1][1] = chunk
|
72 |
+
formatted_history = format_history(history)
|
73 |
+
yield "", history, formatted_history
|
74 |
+
|
75 |
+
with gr.Blocks(css=css) as demo:
|
76 |
+
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>My Chatbot</h1>")
|
77 |
+
|
78 |
+
chat_history = gr.State([])
|
79 |
+
chat_display = gr.HTML(elem_id="chat-display")
|
80 |
+
msg = gr.Textbox(
|
81 |
+
label="Your message",
|
82 |
+
lines=1,
|
83 |
+
placeholder="Type your message here... (Press Enter to send)"
|
84 |
+
)
|
85 |
+
clear = gr.Button("Clear")
|
86 |
+
|
87 |
+
dropdown = gr.Dropdown(
|
88 |
+
["Character 1", "Character 2", "Character 3", "Character 4", "Character 5", "Character 6", "Character 7", "Character 8", "Character 9", "Character 10", "Character 11", "Character 12", "Character 13"],
|
89 |
+
label="Characters",
|
90 |
+
info="Select the character that you'd like to speak to",
|
91 |
+
value="Character 1"
|
92 |
+
)
|
93 |
+
api_key = gr.Textbox(type="password", label="OpenAI API Key")
|
94 |
+
|
95 |
+
msg.submit(user, [msg, chat_history, dropdown, api_key], [msg, chat_history, chat_display])
|
96 |
+
clear.click(lambda: ([], []), None, [chat_history, chat_display], queue=False)
|
97 |
+
dropdown.change(lambda x: ([], []), dropdown, [chat_history, chat_display])
|
98 |
+
|
99 |
+
demo.queue()
|
100 |
+
demo.launch(max_threads=20)
|
3t_dropdown copy 3.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from openai import OpenAI
|
3 |
+
import time
|
4 |
+
import html
|
5 |
+
|
6 |
+
def predict(message, history, character, api_key, progress=gr.Progress()):
|
7 |
+
client = OpenAI(api_key=api_key)
|
8 |
+
history_openai_format = []
|
9 |
+
for human, assistant in history:
|
10 |
+
history_openai_format.append({"role": "user", "content": human})
|
11 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
12 |
+
history_openai_format.append({"role": "user", "content": message})
|
13 |
+
|
14 |
+
response = client.chat.completions.create(
|
15 |
+
model='gpt-4',
|
16 |
+
messages=history_openai_format,
|
17 |
+
temperature=1.0,
|
18 |
+
stream=True
|
19 |
+
)
|
20 |
+
|
21 |
+
partial_message = ""
|
22 |
+
for chunk in progress.tqdm(response, desc="Generating"):
|
23 |
+
if chunk.choices[0].delta.content:
|
24 |
+
partial_message += chunk.choices[0].delta.content
|
25 |
+
yield partial_message
|
26 |
+
time.sleep(0.01)
|
27 |
+
|
28 |
+
def format_history(history):
|
29 |
+
html_content = ""
|
30 |
+
for human, ai in history:
|
31 |
+
human_formatted = html.escape(human).replace('\n', '<br>')
|
32 |
+
html_content += f'<div class="message user-message"><strong>You:</strong> {human_formatted}</div>'
|
33 |
+
if ai:
|
34 |
+
ai_formatted = html.escape(ai).replace('\n', '<br>')
|
35 |
+
html_content += f'<div class="message ai-message"><strong>AI:</strong> {ai_formatted}</div>'
|
36 |
+
return html_content
|
37 |
+
|
38 |
+
css = """
|
39 |
+
#chat-display {
|
40 |
+
height: 600px;
|
41 |
+
overflow-y: auto;
|
42 |
+
border: 1px solid #ccc;
|
43 |
+
padding: 10px;
|
44 |
+
margin-bottom: 10px;
|
45 |
+
}
|
46 |
+
#chat-display::-webkit-scrollbar {
|
47 |
+
width: 10px;
|
48 |
+
}
|
49 |
+
#chat-display::-webkit-scrollbar-track {
|
50 |
+
background: #f1f1f1;
|
51 |
+
}
|
52 |
+
#chat-display::-webkit-scrollbar-thumb {
|
53 |
+
background: #888;
|
54 |
+
}
|
55 |
+
#chat-display::-webkit-scrollbar-thumb:hover {
|
56 |
+
background: #555;
|
57 |
+
}
|
58 |
+
.message {
|
59 |
+
margin-bottom: 10px;
|
60 |
+
word-wrap: break-word;
|
61 |
+
overflow-wrap: break-word;
|
62 |
+
}
|
63 |
+
.user-message, .ai-message {
|
64 |
+
padding: 5px;
|
65 |
+
border-radius: 5px;
|
66 |
+
max-height: 300px;
|
67 |
+
overflow-y: auto;
|
68 |
+
}
|
69 |
+
.user-message {
|
70 |
+
background-color: #e6f3ff;
|
71 |
+
}
|
72 |
+
.ai-message {
|
73 |
+
background-color: #f0f0f0;
|
74 |
+
}
|
75 |
+
.user-message::-webkit-scrollbar, .ai-message::-webkit-scrollbar {
|
76 |
+
width: 5px;
|
77 |
+
}
|
78 |
+
.user-message::-webkit-scrollbar-thumb, .ai-message::-webkit-scrollbar-thumb {
|
79 |
+
background: #888;
|
80 |
+
}
|
81 |
+
"""
|
82 |
+
|
83 |
+
js = """
|
84 |
+
let lastScrollTop = 0;
|
85 |
+
let isNearBottom = true;
|
86 |
+
|
87 |
+
function updateScroll() {
|
88 |
+
const chatDisplay = document.getElementById('chat-display');
|
89 |
+
if (!chatDisplay) return;
|
90 |
+
|
91 |
+
const currentScrollTop = chatDisplay.scrollTop;
|
92 |
+
const scrollHeight = chatDisplay.scrollHeight;
|
93 |
+
const clientHeight = chatDisplay.clientHeight;
|
94 |
+
|
95 |
+
// Check if user was near bottom before update
|
96 |
+
isNearBottom = (currentScrollTop + clientHeight >= scrollHeight - 50);
|
97 |
+
|
98 |
+
if (isNearBottom) {
|
99 |
+
chatDisplay.scrollTop = scrollHeight;
|
100 |
+
} else {
|
101 |
+
chatDisplay.scrollTop = lastScrollTop;
|
102 |
+
}
|
103 |
+
|
104 |
+
lastScrollTop = chatDisplay.scrollTop;
|
105 |
+
}
|
106 |
+
|
107 |
+
// Set up a MutationObserver to watch for changes in the chat display
|
108 |
+
const observer = new MutationObserver(updateScroll);
|
109 |
+
const config = { childList: true, subtree: true };
|
110 |
+
|
111 |
+
// Start observing the chat display for configured mutations
|
112 |
+
document.addEventListener('DOMContentLoaded', (event) => {
|
113 |
+
const chatDisplay = document.getElementById('chat-display');
|
114 |
+
if (chatDisplay) {
|
115 |
+
observer.observe(chatDisplay, config);
|
116 |
+
|
117 |
+
// Also update scroll on manual scroll
|
118 |
+
chatDisplay.addEventListener('scroll', function() {
|
119 |
+
lastScrollTop = chatDisplay.scrollTop;
|
120 |
+
isNearBottom = (chatDisplay.scrollTop + chatDisplay.clientHeight >= chatDisplay.scrollHeight - 50);
|
121 |
+
});
|
122 |
+
}
|
123 |
+
|
124 |
+
// Add event listener for Enter key
|
125 |
+
const textbox = document.querySelector('#component-13 input'); // Update this selector if needed
|
126 |
+
if (textbox) {
|
127 |
+
textbox.addEventListener('keydown', function(e) {
|
128 |
+
if (e.key === 'Enter' && !e.shiftKey) {
|
129 |
+
e.preventDefault();
|
130 |
+
document.querySelector('#component-13 button').click();
|
131 |
+
}
|
132 |
+
});
|
133 |
+
}
|
134 |
+
});
|
135 |
+
"""
|
136 |
+
|
137 |
+
def user(user_message, history, character, api_key):
|
138 |
+
if user_message.strip() == "":
|
139 |
+
return "", history, format_history(history)
|
140 |
+
history.append([user_message, None])
|
141 |
+
formatted_history = format_history(history)
|
142 |
+
|
143 |
+
# Start bot response generation
|
144 |
+
bot_message_generator = predict(user_message, history[:-1], character, api_key)
|
145 |
+
for chunk in bot_message_generator:
|
146 |
+
history[-1][1] = chunk
|
147 |
+
formatted_history = format_history(history)
|
148 |
+
yield "", history, formatted_history
|
149 |
+
|
150 |
+
with gr.Blocks(css=css, js=js) as demo:
|
151 |
+
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>My Chatbot</h1>")
|
152 |
+
|
153 |
+
chat_history = gr.State([])
|
154 |
+
chat_display = gr.HTML(elem_id="chat-display")
|
155 |
+
with gr.Row():
|
156 |
+
msg = gr.Textbox(
|
157 |
+
label="Your message",
|
158 |
+
lines=1,
|
159 |
+
placeholder="Type your message here... (Press Enter to send)",
|
160 |
+
elem_id="user-input"
|
161 |
+
)
|
162 |
+
send_btn = gr.Button("Send")
|
163 |
+
clear = gr.Button("Clear")
|
164 |
+
|
165 |
+
dropdown = gr.Dropdown(
|
166 |
+
["Character 1", "Character 2", "Character 3", "Character 4", "Character 5", "Character 6", "Character 7", "Character 8", "Character 9", "Character 10", "Character 11", "Character 12", "Character 13"],
|
167 |
+
label="Characters",
|
168 |
+
info="Select the character that you'd like to speak to",
|
169 |
+
value="Character 1"
|
170 |
+
)
|
171 |
+
api_key = gr.Textbox(type="password", label="OpenAI API Key")
|
172 |
+
|
173 |
+
send_btn.click(user, [msg, chat_history, dropdown, api_key], [msg, chat_history, chat_display])
|
174 |
+
msg.submit(user, [msg, chat_history, dropdown, api_key], [msg, chat_history, chat_display])
|
175 |
+
clear.click(lambda: ([], []), None, [chat_history, chat_display], queue=False)
|
176 |
+
dropdown.change(lambda x: ([], []), dropdown, [chat_history, chat_display])
|
177 |
+
|
178 |
+
demo.queue()
|
179 |
+
demo.launch(max_threads=20)
|
3t_dropdown copy.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def converse(message, history, character):
|
4 |
+
print("User message:", message)
|
5 |
+
print("Chat history:", history)
|
6 |
+
print("Selected character:", character)
|
7 |
+
|
8 |
+
# Your chatbot logic here
|
9 |
+
response = f"You said: {message}. You're talking to {character}."
|
10 |
+
|
11 |
+
return response
|
12 |
+
|
13 |
+
def reset(character):
|
14 |
+
return [], []
|
15 |
+
|
16 |
+
# Gradio app
|
17 |
+
with gr.Blocks() as demo:
|
18 |
+
gr.Markdown(f"<h1 style='text-align: center; margin-bottom: 1rem'>{'My Chatbot'}</h1>")
|
19 |
+
bot = gr.Chatbot(render=False)
|
20 |
+
dropdown = gr.Dropdown(
|
21 |
+
["Character 1", "Character 2", "Character 3", "Character 4", "Character 5", "Character 6", "Character 7", "Character 8", "Character 9", "Character 10", "Character 11", "Character 12", "Character 13"],
|
22 |
+
label="Characters",
|
23 |
+
info="Select the character that you'd like to speak to",
|
24 |
+
value="Character 1"
|
25 |
+
)
|
26 |
+
chat = gr.ChatInterface(
|
27 |
+
fn=converse,
|
28 |
+
chatbot=bot,
|
29 |
+
additional_inputs=dropdown
|
30 |
+
)
|
31 |
+
dropdown.change(fn=reset, inputs=dropdown, outputs=[bot, chat.chatbot_state])
|
32 |
+
|
33 |
+
demo.queue()
|
34 |
+
demo.launch()
|
3t_dropdown.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from openai import OpenAI
|
3 |
+
import time
|
4 |
+
import html
|
5 |
+
|
6 |
+
def predict(message, history, character, api_key, progress=gr.Progress()):
|
7 |
+
client = OpenAI(api_key=api_key)
|
8 |
+
history_openai_format = []
|
9 |
+
for human, assistant in history:
|
10 |
+
history_openai_format.append({"role": "user", "content": human})
|
11 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
12 |
+
history_openai_format.append({"role": "user", "content": message})
|
13 |
+
|
14 |
+
response = client.chat.completions.create(
|
15 |
+
model='gpt-4',
|
16 |
+
messages=history_openai_format,
|
17 |
+
temperature=1.0,
|
18 |
+
stream=True
|
19 |
+
)
|
20 |
+
|
21 |
+
partial_message = ""
|
22 |
+
for chunk in progress.tqdm(response, desc="Generating"):
|
23 |
+
if chunk.choices[0].delta.content:
|
24 |
+
partial_message += chunk.choices[0].delta.content
|
25 |
+
yield partial_message
|
26 |
+
time.sleep(0.01)
|
27 |
+
|
28 |
+
def format_history(history):
|
29 |
+
html_content = ""
|
30 |
+
for human, ai in history:
|
31 |
+
human_formatted = html.escape(human).replace('\n', '<br>')
|
32 |
+
html_content += f'<div class="message user-message"><strong>You:</strong> {human_formatted}</div>'
|
33 |
+
if ai:
|
34 |
+
ai_formatted = html.escape(ai).replace('\n', '<br>')
|
35 |
+
html_content += f'<div class="message ai-message"><strong>AI:</strong> {ai_formatted}</div>'
|
36 |
+
return html_content
|
37 |
+
|
38 |
+
css = """
|
39 |
+
#chat-display {
|
40 |
+
height: 600px;
|
41 |
+
overflow-y: auto;
|
42 |
+
border: 1px solid #ccc;
|
43 |
+
padding: 10px;
|
44 |
+
margin-bottom: 10px;
|
45 |
+
}
|
46 |
+
.message {
|
47 |
+
margin-bottom: 10px;
|
48 |
+
word-wrap: break-word;
|
49 |
+
overflow-wrap: break-word;
|
50 |
+
}
|
51 |
+
.user-message, .ai-message {
|
52 |
+
padding: 5px;
|
53 |
+
border-radius: 5px;
|
54 |
+
max-height: 300px;
|
55 |
+
overflow-y: auto;
|
56 |
+
}
|
57 |
+
.user-message {
|
58 |
+
background-color: #e6f3ff;
|
59 |
+
}
|
60 |
+
.ai-message {
|
61 |
+
background-color: #f0f0f0;
|
62 |
+
}
|
63 |
+
"""
|
64 |
+
|
65 |
+
js = """
|
66 |
+
let lastScrollHeight = 0;
|
67 |
+
let lastScrollTop = 0;
|
68 |
+
let isNearBottom = true;
|
69 |
+
|
70 |
+
function updateScroll() {
|
71 |
+
const chatDisplay = document.getElementById('chat-display');
|
72 |
+
if (!chatDisplay) return;
|
73 |
+
|
74 |
+
const newScrollHeight = chatDisplay.scrollHeight;
|
75 |
+
const scrollDifference = newScrollHeight - lastScrollHeight;
|
76 |
+
|
77 |
+
if (isNearBottom) {
|
78 |
+
chatDisplay.scrollTop = newScrollHeight;
|
79 |
+
} else {
|
80 |
+
chatDisplay.scrollTop = lastScrollTop + scrollDifference;
|
81 |
+
}
|
82 |
+
|
83 |
+
lastScrollHeight = newScrollHeight;
|
84 |
+
lastScrollTop = chatDisplay.scrollTop;
|
85 |
+
|
86 |
+
isNearBottom = (chatDisplay.scrollTop + chatDisplay.clientHeight >= chatDisplay.scrollHeight - 50);
|
87 |
+
}
|
88 |
+
|
89 |
+
// Set up a MutationObserver to watch for changes in the chat display
|
90 |
+
const observer = new MutationObserver(updateScroll);
|
91 |
+
const config = { childList: true, subtree: true };
|
92 |
+
|
93 |
+
// Start observing the chat display for configured mutations
|
94 |
+
document.addEventListener('DOMContentLoaded', (event) => {
|
95 |
+
const chatDisplay = document.getElementById('chat-display');
|
96 |
+
if (chatDisplay) {
|
97 |
+
observer.observe(chatDisplay, config);
|
98 |
+
|
99 |
+
// Update scroll state on manual scroll
|
100 |
+
chatDisplay.addEventListener('scroll', function() {
|
101 |
+
lastScrollTop = chatDisplay.scrollTop;
|
102 |
+
isNearBottom = (chatDisplay.scrollTop + chatDisplay.clientHeight >= chatDisplay.scrollHeight - 50);
|
103 |
+
});
|
104 |
+
}
|
105 |
+
});
|
106 |
+
"""
|
107 |
+
|
108 |
+
def user(user_message, history, character, api_key):
|
109 |
+
if user_message.strip() == "":
|
110 |
+
return "", history, format_history(history)
|
111 |
+
history.append([user_message, None])
|
112 |
+
formatted_history = format_history(history)
|
113 |
+
return "", history, formatted_history
|
114 |
+
|
115 |
+
def bot(history, character, api_key):
|
116 |
+
if not history:
|
117 |
+
return history, format_history(history)
|
118 |
+
user_message = history[-1][0]
|
119 |
+
bot_message_generator = predict(user_message, history[:-1], character, api_key)
|
120 |
+
for chunk in bot_message_generator:
|
121 |
+
history[-1][1] = chunk
|
122 |
+
yield history, format_history(history)
|
123 |
+
|
124 |
+
with gr.Blocks(css=css, js=js) as demo:
|
125 |
+
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>My Chatbot</h1>")
|
126 |
+
|
127 |
+
chat_history = gr.State([])
|
128 |
+
chat_display = gr.HTML(elem_id="chat-display")
|
129 |
+
with gr.Row():
|
130 |
+
msg = gr.Textbox(
|
131 |
+
label="Your message",
|
132 |
+
lines=1,
|
133 |
+
placeholder="Type your message here... (Press Enter to send)"
|
134 |
+
)
|
135 |
+
send_btn = gr.Button("Send")
|
136 |
+
clear = gr.Button("Clear")
|
137 |
+
|
138 |
+
dropdown = gr.Dropdown(
|
139 |
+
["Character 1", "Character 2", "Character 3", "Character 4", "Character 5", "Character 6", "Character 7", "Character 8", "Character 9", "Character 10", "Character 11", "Character 12", "Character 13"],
|
140 |
+
label="Characters",
|
141 |
+
info="Select the character that you'd like to speak to",
|
142 |
+
value="Character 1"
|
143 |
+
)
|
144 |
+
api_key = gr.Textbox(type="password", label="OpenAI API Key")
|
145 |
+
|
146 |
+
def send_message(user_message, history, character, api_key):
|
147 |
+
return user(user_message, history, character, api_key)
|
148 |
+
|
149 |
+
send_btn.click(send_message, [msg, chat_history, dropdown, api_key], [msg, chat_history, chat_display]).then(
|
150 |
+
bot, [chat_history, dropdown, api_key], [chat_history, chat_display]
|
151 |
+
)
|
152 |
+
msg.submit(send_message, [msg, chat_history, dropdown, api_key], [msg, chat_history, chat_display]).then(
|
153 |
+
bot, [chat_history, dropdown, api_key], [chat_history, chat_display]
|
154 |
+
)
|
155 |
+
clear.click(lambda: ([], []), None, [chat_history, chat_display], queue=False)
|
156 |
+
dropdown.change(lambda x: ([], []), dropdown, [chat_history, chat_display])
|
157 |
+
|
158 |
+
demo.queue()
|
159 |
+
demo.launch(max_threads=20)
|
3t_dropdown_CHAT.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from openai import OpenAI
|
3 |
+
import time
|
4 |
+
import html
|
5 |
+
|
6 |
+
def predict(message, history, character, api_key, progress=gr.Progress()):
|
7 |
+
client = OpenAI(api_key=api_key)
|
8 |
+
history_openai_format = []
|
9 |
+
for human, assistant in history:
|
10 |
+
history_openai_format.append({"role": "user", "content": human})
|
11 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
12 |
+
history_openai_format.append({"role": "user", "content": message})
|
13 |
+
|
14 |
+
response = client.chat.completions.create(
|
15 |
+
model='gpt-4',
|
16 |
+
messages=history_openai_format,
|
17 |
+
temperature=1.0,
|
18 |
+
stream=True
|
19 |
+
)
|
20 |
+
|
21 |
+
partial_message = ""
|
22 |
+
for chunk in progress.tqdm(response, desc="Generating"):
|
23 |
+
if chunk.choices[0].delta.content:
|
24 |
+
partial_message += chunk.choices[0].delta.content
|
25 |
+
yield partial_message
|
26 |
+
time.sleep(0.01)
|
27 |
+
|
28 |
+
def format_history(history):
|
29 |
+
html_content = ""
|
30 |
+
for human, ai in history:
|
31 |
+
human_formatted = html.escape(human).replace('\n', '<br>')
|
32 |
+
html_content += f'<div class="message user-message"><strong>You:</strong> {human_formatted}</div>'
|
33 |
+
if ai:
|
34 |
+
ai_formatted = html.escape(ai).replace('\n', '<br>')
|
35 |
+
html_content += f'<div class="message ai-message"><strong>AI:</strong> {ai_formatted}</div>'
|
36 |
+
return html_content
|
37 |
+
|
38 |
+
css = """
|
39 |
+
#chat-display {
|
40 |
+
height: 600px;
|
41 |
+
overflow-y: auto;
|
42 |
+
border: 1px solid #ccc;
|
43 |
+
padding: 10px;
|
44 |
+
margin-bottom: 10px;
|
45 |
+
}
|
46 |
+
#chat-display::-webkit-scrollbar {
|
47 |
+
width: 10px;
|
48 |
+
}
|
49 |
+
#chat-display::-webkit-scrollbar-track {
|
50 |
+
background: #f1f1f1;
|
51 |
+
}
|
52 |
+
#chat-display::-webkit-scrollbar-thumb {
|
53 |
+
background: #888;
|
54 |
+
}
|
55 |
+
#chat-display::-webkit-scrollbar-thumb:hover {
|
56 |
+
background: #555;
|
57 |
+
}
|
58 |
+
.message {
|
59 |
+
margin-bottom: 10px;
|
60 |
+
max-height: 300px;
|
61 |
+
overflow-y: auto;
|
62 |
+
word-wrap: break-word;
|
63 |
+
}
|
64 |
+
.user-message {
|
65 |
+
background-color: #e6f3ff;
|
66 |
+
padding: 5px;
|
67 |
+
border-radius: 5px;
|
68 |
+
}
|
69 |
+
.ai-message {
|
70 |
+
background-color: #f0f0f0;
|
71 |
+
padding: 5px;
|
72 |
+
border-radius: 5px;
|
73 |
+
}
|
74 |
+
"""
|
75 |
+
|
76 |
+
js = """
|
77 |
+
function maintainScroll(element_id) {
|
78 |
+
let element = document.getElementById(element_id);
|
79 |
+
let shouldScroll = element.scrollTop + element.clientHeight === element.scrollHeight;
|
80 |
+
let previousScrollTop = element.scrollTop;
|
81 |
+
|
82 |
+
return function() {
|
83 |
+
if (!shouldScroll) {
|
84 |
+
element.scrollTop = previousScrollTop;
|
85 |
+
} else {
|
86 |
+
element.scrollTop = element.scrollHeight;
|
87 |
+
}
|
88 |
+
}
|
89 |
+
}
|
90 |
+
|
91 |
+
let scrollMaintainer = maintainScroll('chat-display');
|
92 |
+
setInterval(scrollMaintainer, 100);
|
93 |
+
|
94 |
+
// Add event listener for Ctrl+Enter and prevent default Enter behavior
|
95 |
+
document.addEventListener('DOMContentLoaded', (event) => {
|
96 |
+
const textbox = document.querySelector('#your_message textarea');
|
97 |
+
textbox.addEventListener('keydown', function(e) {
|
98 |
+
if (e.ctrlKey && e.key === 'Enter') {
|
99 |
+
e.preventDefault();
|
100 |
+
document.querySelector('#your_message button').click();
|
101 |
+
} else if (e.key === 'Enter' && !e.shiftKey) {
|
102 |
+
e.preventDefault();
|
103 |
+
const start = this.selectionStart;
|
104 |
+
const end = this.selectionEnd;
|
105 |
+
this.value = this.value.substring(0, start) + "\\n" + this.value.substring(end);
|
106 |
+
this.selectionStart = this.selectionEnd = start + 1;
|
107 |
+
}
|
108 |
+
});
|
109 |
+
});
|
110 |
+
"""
|
111 |
+
|
112 |
+
with gr.Blocks(css=css, js=js) as demo:
|
113 |
+
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>My Chatbot</h1>")
|
114 |
+
|
115 |
+
chat_history = gr.State([])
|
116 |
+
chat_display = gr.HTML(elem_id="chat-display")
|
117 |
+
msg = gr.Textbox(
|
118 |
+
label="Your message",
|
119 |
+
lines=2,
|
120 |
+
max_lines=10,
|
121 |
+
placeholder="Type your message here... (Press Ctrl+Enter to send, Enter for new line)",
|
122 |
+
elem_id="your_message"
|
123 |
+
)
|
124 |
+
clear = gr.Button("Clear")
|
125 |
+
|
126 |
+
dropdown = gr.Dropdown(
|
127 |
+
["Character 1", "Character 2", "Character 3", "Character 4", "Character 5", "Character 6", "Character 7", "Character 8", "Character 9", "Character 10", "Character 11", "Character 12", "Character 13"],
|
128 |
+
label="Characters",
|
129 |
+
info="Select the character that you'd like to speak to",
|
130 |
+
value="Character 1"
|
131 |
+
)
|
132 |
+
api_key = gr.Textbox(type="password", label="OpenAI API Key")
|
133 |
+
|
134 |
+
def user(user_message, history):
|
135 |
+
history.append([user_message, None])
|
136 |
+
return "", history, format_history(history)
|
137 |
+
|
138 |
+
def bot(history, character, api_key):
|
139 |
+
user_message = history[-1][0]
|
140 |
+
bot_message_generator = predict(user_message, history[:-1], character, api_key)
|
141 |
+
for chunk in bot_message_generator:
|
142 |
+
history[-1][1] = chunk
|
143 |
+
yield history, format_history(history)
|
144 |
+
|
145 |
+
msg.submit(user, [msg, chat_history], [msg, chat_history, chat_display]).then(
|
146 |
+
bot, [chat_history, dropdown, api_key], [chat_history, chat_display]
|
147 |
+
)
|
148 |
+
clear.click(lambda: ([], []), None, [chat_history, chat_display], queue=False)
|
149 |
+
dropdown.change(lambda x: ([], []), dropdown, [chat_history, chat_display])
|
150 |
+
|
151 |
+
demo.queue()
|
152 |
+
demo.launch(max_threads=20)
|
app copy 2.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
|
6 |
+
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
7 |
+
|
8 |
+
# Load a GPT-2 model for general question answering
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained("gpt2-medium", cache_dir="./cache")
|
10 |
+
model = AutoModelForCausalLM.from_pretrained("gpt2-medium", cache_dir="./cache")
|
11 |
+
|
12 |
+
def transcribe(audio):
|
13 |
+
if audio is None:
|
14 |
+
return "No audio recorded."
|
15 |
+
sr, y = audio
|
16 |
+
y = y.astype(np.float32)
|
17 |
+
y /= np.max(np.abs(y))
|
18 |
+
|
19 |
+
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
20 |
+
|
21 |
+
def answer(question):
|
22 |
+
input_ids = tokenizer.encode(f"Q: {question}\nA:", return_tensors="pt")
|
23 |
+
|
24 |
+
# Generate a response
|
25 |
+
with torch.no_grad():
|
26 |
+
output = model.generate(input_ids, max_length=150, num_return_sequences=1,
|
27 |
+
temperature=0.7, top_k=50, top_p=0.95)
|
28 |
+
|
29 |
+
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
30 |
+
|
31 |
+
# Extract only the answer part
|
32 |
+
answer = response.split("A:")[-1].strip()
|
33 |
+
print(answer)
|
34 |
+
return response
|
35 |
+
|
36 |
+
def process_audio(audio):
|
37 |
+
if audio is None:
|
38 |
+
return "No audio recorded.", ""
|
39 |
+
transcription = transcribe(audio)
|
40 |
+
answer_result = answer(transcription)
|
41 |
+
return transcription, answer_result
|
42 |
+
|
43 |
+
def clear_all():
|
44 |
+
return None, "", ""
|
45 |
+
|
46 |
+
with gr.Blocks() as demo:
|
47 |
+
gr.Markdown("# Audio Transcription and Question Answering")
|
48 |
+
|
49 |
+
audio_input = gr.Audio(label="Audio Input", sources=["microphone"], type="numpy")
|
50 |
+
transcription_output = gr.Textbox(label="Transcription")
|
51 |
+
answer_output = gr.Textbox(label="Answer Result", lines=10)
|
52 |
+
|
53 |
+
clear_button = gr.Button("Clear")
|
54 |
+
|
55 |
+
audio_input.stop_recording(
|
56 |
+
fn=process_audio,
|
57 |
+
inputs=[audio_input],
|
58 |
+
outputs=[transcription_output, answer_output]
|
59 |
+
)
|
60 |
+
|
61 |
+
clear_button.click(
|
62 |
+
fn=clear_all,
|
63 |
+
inputs=[],
|
64 |
+
outputs=[audio_input, transcription_output, answer_output]
|
65 |
+
)
|
66 |
+
|
67 |
+
demo.launch()
|
app copy 3.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from openai import OpenAI
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
api_key = "sk-proj-FuXWodUyfcvTYcc6K9ekT3BlbkFJ2eV3qu7UZOAWlW2eWRLQ" # Replace with your key
|
6 |
+
client = OpenAI(api_key=api_key)
|
7 |
+
|
8 |
+
def predict(message, history):
|
9 |
+
history_openai_format = []
|
10 |
+
for human, assistant in history:
|
11 |
+
history_openai_format.append({"role": "user", "content": human })
|
12 |
+
history_openai_format.append({"role": "assistant", "content":assistant})
|
13 |
+
history_openai_format.append({"role": "user", "content": message})
|
14 |
+
|
15 |
+
response = client.chat.completions.create(model='gpt-3.5-turbo',
|
16 |
+
messages= history_openai_format,
|
17 |
+
temperature=1.0,
|
18 |
+
stream=True)
|
19 |
+
|
20 |
+
partial_message = ""
|
21 |
+
for chunk in response:
|
22 |
+
if chunk.choices[0].delta.content is not None:
|
23 |
+
partial_message = partial_message + chunk.choices[0].delta.content
|
24 |
+
yield partial_message
|
25 |
+
|
26 |
+
gr.ChatInterface(predict).launch()
|
app copy 4.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
def predict(message, history, api_key):
|
5 |
+
client = OpenAI(api_key=api_key)
|
6 |
+
|
7 |
+
history_openai_format = []
|
8 |
+
for human, assistant in history:
|
9 |
+
history_openai_format.append({"role": "user", "content": human})
|
10 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
11 |
+
history_openai_format.append({"role": "user", "content": message})
|
12 |
+
|
13 |
+
response = client.chat.completions.create(
|
14 |
+
model='gpt-3.5-turbo',
|
15 |
+
messages=history_openai_format,
|
16 |
+
temperature=1.0,
|
17 |
+
stream=True
|
18 |
+
)
|
19 |
+
|
20 |
+
partial_message = ""
|
21 |
+
for chunk in response:
|
22 |
+
if chunk.choices[0].delta.content is not None:
|
23 |
+
partial_message += chunk.choices[0].delta.content
|
24 |
+
yield partial_message
|
25 |
+
|
26 |
+
def chat_with_api_key(api_key, message, history):
|
27 |
+
return predict(message, history, api_key)
|
28 |
+
|
29 |
+
with gr.Blocks() as demo:
|
30 |
+
api_key = gr.Textbox(label="API Key", placeholder="Enter your API key", type="password")
|
31 |
+
message = gr.Textbox(label="Message")
|
32 |
+
state = gr.State([])
|
33 |
+
output = gr.Textbox(label="Output")
|
34 |
+
|
35 |
+
def update_output(api_key, message, state):
|
36 |
+
response = chat_with_api_key(api_key, message, state)
|
37 |
+
state.append((message, response))
|
38 |
+
return response, state
|
39 |
+
|
40 |
+
btn = gr.Button("Submit")
|
41 |
+
btn.click(update_output, inputs=[api_key, message, state], outputs=[output, state])
|
42 |
+
|
43 |
+
demo.launch()
|
app copy 5.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import pipeline
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
7 |
+
qa_model = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")
|
8 |
+
|
9 |
+
def predict(message, history, api_key):
|
10 |
+
print('in predict')
|
11 |
+
client = OpenAI(api_key=api_key)
|
12 |
+
history_openai_format = []
|
13 |
+
if 0:
|
14 |
+
for human, assistant in history:
|
15 |
+
history_openai_format.append({"role": "user", "content": human})
|
16 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
17 |
+
history_openai_format.append({"role": "user", "content": message})
|
18 |
+
|
19 |
+
response = client.chat.completions.create(
|
20 |
+
model='gpt-4o',
|
21 |
+
messages=history_openai_format,
|
22 |
+
temperature=1.0,
|
23 |
+
stream=True
|
24 |
+
)
|
25 |
+
|
26 |
+
partial_message = ""
|
27 |
+
for chunk in response:
|
28 |
+
if chunk.choices[0].delta.content:
|
29 |
+
print(111, chunk.choices[0].delta.content)
|
30 |
+
partial_message += chunk.choices[0].delta.content
|
31 |
+
yield partial_message
|
32 |
+
|
33 |
+
def chat_with_api_key(api_key, message, history):
|
34 |
+
print('in chat_with_api_key')
|
35 |
+
accumulated_message = ""
|
36 |
+
for partial_message in predict(message, history, api_key):
|
37 |
+
accumulated_message = partial_message
|
38 |
+
history.append((message, accumulated_message))
|
39 |
+
yield accumulated_message, history
|
40 |
+
|
41 |
+
def transcribe(audio):
|
42 |
+
if audio is None:
|
43 |
+
return "No audio recorded."
|
44 |
+
sr, y = audio
|
45 |
+
y = y.astype(np.float32)
|
46 |
+
y /= np.max(np.abs(y))
|
47 |
+
|
48 |
+
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
49 |
+
|
50 |
+
def answer(transcription):
|
51 |
+
context = "You are chatbot answering general questions"
|
52 |
+
print(transcription)
|
53 |
+
result = qa_model(question=transcription, context=context)
|
54 |
+
print(result)
|
55 |
+
return result['answer']
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
def clear_all():
|
60 |
+
return None, "", ""
|
61 |
+
|
62 |
+
with gr.Blocks() as demo:
|
63 |
+
with gr.Row():
|
64 |
+
api_key = gr.Textbox(label="API Key", placeholder="Enter your API key", type="password")
|
65 |
+
message = gr.Textbox(label="Message")
|
66 |
+
|
67 |
+
gr.Markdown("# Audio Transcription and Question Answering")
|
68 |
+
with gr.Row():
|
69 |
+
audio_input = gr.Audio(label="Audio Input", sources=["microphone"], type="numpy")
|
70 |
+
with gr.Column():
|
71 |
+
transcription_output = gr.Textbox(label="Transcription")
|
72 |
+
clear_button = gr.Button("Clear")
|
73 |
+
state = gr.State([])
|
74 |
+
output = gr.Textbox(label="Output", lines=10)
|
75 |
+
|
76 |
+
def update_output(api_key, audio_input, state):
|
77 |
+
print('in update_output')
|
78 |
+
message = transcribe(audio_input)
|
79 |
+
responses = chat_with_api_key(api_key, message, state)
|
80 |
+
accumulated_response = ""
|
81 |
+
for response, updated_state in responses:
|
82 |
+
accumulated_response = response
|
83 |
+
yield accumulated_response, updated_state
|
84 |
+
|
85 |
+
btn = gr.Button("Submit")
|
86 |
+
btn.click(update_output, inputs=[api_key, message, state], outputs=[output, state])
|
87 |
+
|
88 |
+
audio_input.stop_recording(
|
89 |
+
fn=update_output,
|
90 |
+
inputs=[api_key, audio_input, state],
|
91 |
+
outputs=[output, state]
|
92 |
+
)
|
93 |
+
|
94 |
+
clear_button.click(
|
95 |
+
fn=clear_all,
|
96 |
+
inputs=[],
|
97 |
+
outputs=[audio_input, transcription_output, output]
|
98 |
+
)
|
99 |
+
|
100 |
+
demo.launch()
|
app copy 6_blocked_scroll.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
import numpy as np
|
4 |
+
from openai import OpenAI
|
5 |
+
|
6 |
+
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
7 |
+
qa_model = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")
|
8 |
+
|
9 |
+
def predict(message, history, api_key):
|
10 |
+
print('in predict')
|
11 |
+
client = OpenAI(api_key=api_key)
|
12 |
+
history_openai_format = []
|
13 |
+
for human, assistant in history:
|
14 |
+
history_openai_format.append({"role": "user", "content": human})
|
15 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
16 |
+
history_openai_format.append({"role": "user", "content": message})
|
17 |
+
|
18 |
+
response = client.chat.completions.create(
|
19 |
+
model='gpt-4o',
|
20 |
+
messages=history_openai_format,
|
21 |
+
temperature=1.0,
|
22 |
+
stream=True
|
23 |
+
)
|
24 |
+
|
25 |
+
partial_message = ""
|
26 |
+
for chunk in response:
|
27 |
+
if chunk.choices[0].delta.content:
|
28 |
+
print(111, chunk.choices[0].delta.content)
|
29 |
+
partial_message += chunk.choices[0].delta.content
|
30 |
+
yield partial_message
|
31 |
+
|
32 |
+
def chat_with_api_key(api_key, message, history):
|
33 |
+
print('in chat_with_api_key')
|
34 |
+
accumulated_message = ""
|
35 |
+
for partial_message in predict(message, history, api_key):
|
36 |
+
accumulated_message = partial_message
|
37 |
+
history.append((message, accumulated_message))
|
38 |
+
# yield accumulated_message, history
|
39 |
+
yield message,[[message, accumulated_message]]
|
40 |
+
|
41 |
+
def transcribe(audio):
|
42 |
+
if audio is None:
|
43 |
+
return "No audio recorded."
|
44 |
+
sr, y = audio
|
45 |
+
y = y.astype(np.float32)
|
46 |
+
y /= np.max(np.abs(y))
|
47 |
+
|
48 |
+
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
49 |
+
|
50 |
+
def answer(transcription):
|
51 |
+
context = "You are a chatbot answering general questions"
|
52 |
+
result = qa_model(question=transcription, context=context)
|
53 |
+
return result['answer']
|
54 |
+
|
55 |
+
def process_audio(audio):
|
56 |
+
if audio is None:
|
57 |
+
return "No audio recorded.", []
|
58 |
+
transcription = transcribe(audio)
|
59 |
+
answer_result = answer(transcription)
|
60 |
+
return transcription, [[transcription, answer_result]]
|
61 |
+
|
62 |
+
def update_output(api_key, audio_input, state):
|
63 |
+
print('in update_output')
|
64 |
+
message = transcribe(audio_input)
|
65 |
+
responses = chat_with_api_key(api_key, message, state)
|
66 |
+
accumulated_response = ""
|
67 |
+
for response, updated_state in responses:
|
68 |
+
accumulated_response = response
|
69 |
+
yield accumulated_response, updated_state
|
70 |
+
|
71 |
+
def clear_all():
|
72 |
+
return None, "", []
|
73 |
+
|
74 |
+
with gr.Blocks() as demo:
|
75 |
+
answer_output = gr.Chatbot(label="Answer Result")
|
76 |
+
with gr.Row():
|
77 |
+
audio_input = gr.Audio(label="Audio Input", sources=["microphone"], type="numpy")
|
78 |
+
with gr.Column():
|
79 |
+
api_key = gr.Textbox(label="API Key", placeholder="Enter your API key", type="password")
|
80 |
+
transcription_output = gr.Textbox(label="Transcription")
|
81 |
+
clear_button = gr.Button("Clear")
|
82 |
+
state = gr.State([])
|
83 |
+
if 1:
|
84 |
+
audio_input.stop_recording(
|
85 |
+
fn=update_output,
|
86 |
+
inputs=[api_key, audio_input, state],
|
87 |
+
outputs=[transcription_output, answer_output]
|
88 |
+
)
|
89 |
+
if 0:
|
90 |
+
audio_input.stop_recording(
|
91 |
+
fn=process_audio,
|
92 |
+
inputs=[audio_input],
|
93 |
+
outputs=[transcription_output, answer_output]
|
94 |
+
)
|
95 |
+
|
96 |
+
clear_button.click(
|
97 |
+
fn=clear_all,
|
98 |
+
inputs=[],
|
99 |
+
outputs=[audio_input, transcription_output, answer_output]
|
100 |
+
)
|
101 |
+
|
102 |
+
demo.launch()
|
app copy 6_record_chatbot.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
6 |
+
qa_model = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")
|
7 |
+
|
8 |
+
def transcribe(audio):
|
9 |
+
if audio is None:
|
10 |
+
return "No audio recorded."
|
11 |
+
sr, y = audio
|
12 |
+
y = y.astype(np.float32)
|
13 |
+
y /= np.max(np.abs(y))
|
14 |
+
|
15 |
+
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
16 |
+
|
17 |
+
def answer(transcription):
|
18 |
+
context = "You are a chatbot answering general questions"
|
19 |
+
result = qa_model(question=transcription, context=context)
|
20 |
+
return result['answer']
|
21 |
+
|
22 |
+
def process_audio(audio):
|
23 |
+
if audio is None:
|
24 |
+
return "No audio recorded.", []
|
25 |
+
transcription = transcribe(audio)
|
26 |
+
answer_result = answer(transcription)
|
27 |
+
return transcription, [[transcription, answer_result]]
|
28 |
+
|
29 |
+
def clear_all():
|
30 |
+
return None, "", []
|
31 |
+
|
32 |
+
with gr.Blocks() as demo:
|
33 |
+
gr.Markdown("# Audio Transcription and Question Answering")
|
34 |
+
|
35 |
+
audio_input = gr.Audio(label="Audio Input", sources=["microphone"], type="numpy")
|
36 |
+
transcription_output = gr.Textbox(label="Transcription")
|
37 |
+
answer_output = gr.Chatbot(label="Answer Result")
|
38 |
+
|
39 |
+
clear_button = gr.Button("Clear")
|
40 |
+
|
41 |
+
audio_input.stop_recording(
|
42 |
+
fn=process_audio,
|
43 |
+
inputs=[audio_input],
|
44 |
+
outputs=[transcription_output, answer_output]
|
45 |
+
)
|
46 |
+
|
47 |
+
clear_button.click(
|
48 |
+
fn=clear_all,
|
49 |
+
inputs=[],
|
50 |
+
outputs=[audio_input, transcription_output, answer_output]
|
51 |
+
)
|
52 |
+
|
53 |
+
demo.launch()
|
app copy 6_working.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
import numpy as np
|
4 |
+
from openai import OpenAI
|
5 |
+
|
6 |
+
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
7 |
+
qa_model = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")
|
8 |
+
|
9 |
+
def predict(message, history, api_key):
|
10 |
+
print('in predict')
|
11 |
+
client = OpenAI(api_key=api_key)
|
12 |
+
history_openai_format = []
|
13 |
+
for human, assistant in history:
|
14 |
+
history_openai_format.append({"role": "user", "content": human})
|
15 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
16 |
+
history_openai_format.append({"role": "user", "content": message})
|
17 |
+
|
18 |
+
response = client.chat.completions.create(
|
19 |
+
model='gpt-4o',
|
20 |
+
messages=history_openai_format,
|
21 |
+
temperature=1.0,
|
22 |
+
stream=True
|
23 |
+
)
|
24 |
+
|
25 |
+
partial_message = ""
|
26 |
+
for chunk in response:
|
27 |
+
if chunk.choices[0].delta.content:
|
28 |
+
print(111, chunk.choices[0].delta.content)
|
29 |
+
partial_message += chunk.choices[0].delta.content
|
30 |
+
yield partial_message
|
31 |
+
|
32 |
+
def chat_with_api_key(api_key, message, history):
|
33 |
+
print('in chat_with_api_key')
|
34 |
+
accumulated_message = ""
|
35 |
+
for partial_message in predict(message, history, api_key):
|
36 |
+
accumulated_message = partial_message
|
37 |
+
history.append((message, accumulated_message))
|
38 |
+
# yield accumulated_message, history
|
39 |
+
yield message,[[message, accumulated_message]]
|
40 |
+
|
41 |
+
def transcribe(audio):
|
42 |
+
if audio is None:
|
43 |
+
return "No audio recorded."
|
44 |
+
sr, y = audio
|
45 |
+
y = y.astype(np.float32)
|
46 |
+
y /= np.max(np.abs(y))
|
47 |
+
|
48 |
+
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
49 |
+
|
50 |
+
def answer(transcription):
|
51 |
+
context = "You are a chatbot answering general questions"
|
52 |
+
result = qa_model(question=transcription, context=context)
|
53 |
+
return result['answer']
|
54 |
+
|
55 |
+
def process_audio(audio):
|
56 |
+
if audio is None:
|
57 |
+
return "No audio recorded.", []
|
58 |
+
transcription = transcribe(audio)
|
59 |
+
answer_result = answer(transcription)
|
60 |
+
return transcription, [[transcription, answer_result]]
|
61 |
+
|
62 |
+
def update_output(api_key, audio_input, state):
|
63 |
+
print('in update_output')
|
64 |
+
message = transcribe(audio_input)
|
65 |
+
responses = chat_with_api_key(api_key, message, state)
|
66 |
+
accumulated_response = ""
|
67 |
+
for response, updated_state in responses:
|
68 |
+
accumulated_response = response
|
69 |
+
yield accumulated_response, updated_state
|
70 |
+
|
71 |
+
def clear_all():
|
72 |
+
return None, "", []
|
73 |
+
|
74 |
+
with gr.Blocks() as demo:
|
75 |
+
answer_output = gr.Chatbot(label="Answer Result")
|
76 |
+
with gr.Row():
|
77 |
+
audio_input = gr.Audio(label="Audio Input", sources=["microphone"], type="numpy")
|
78 |
+
with gr.Column():
|
79 |
+
api_key = gr.Textbox(label="API Key", placeholder="Enter your API key", type="password")
|
80 |
+
transcription_output = gr.Textbox(label="Transcription")
|
81 |
+
clear_button = gr.Button("Clear")
|
82 |
+
state = gr.State([])
|
83 |
+
if 1:
|
84 |
+
audio_input.stop_recording(
|
85 |
+
fn=update_output,
|
86 |
+
inputs=[api_key, audio_input, state],
|
87 |
+
outputs=[transcription_output, answer_output]
|
88 |
+
)
|
89 |
+
if 0:
|
90 |
+
audio_input.stop_recording(
|
91 |
+
fn=process_audio,
|
92 |
+
inputs=[audio_input],
|
93 |
+
outputs=[transcription_output, answer_output]
|
94 |
+
)
|
95 |
+
|
96 |
+
clear_button.click(
|
97 |
+
fn=clear_all,
|
98 |
+
inputs=[],
|
99 |
+
outputs=[audio_input, transcription_output, answer_output]
|
100 |
+
)
|
101 |
+
|
102 |
+
demo.launch()
|
app copy _6_working.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import pipeline
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
7 |
+
qa_model = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")
|
8 |
+
|
9 |
+
def predict(message, history, api_key):
|
10 |
+
print('in predict')
|
11 |
+
client = OpenAI(api_key=api_key)
|
12 |
+
history_openai_format = []
|
13 |
+
if 0:
|
14 |
+
for human, assistant in history:
|
15 |
+
history_openai_format.append({"role": "user", "content": human})
|
16 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
17 |
+
history_openai_format.append({"role": "user", "content": message})
|
18 |
+
|
19 |
+
response = client.chat.completions.create(
|
20 |
+
model='gpt-4o',
|
21 |
+
messages=history_openai_format,
|
22 |
+
temperature=1.0,
|
23 |
+
stream=True
|
24 |
+
)
|
25 |
+
|
26 |
+
partial_message = ""
|
27 |
+
for chunk in response:
|
28 |
+
if chunk.choices[0].delta.content:
|
29 |
+
print(111, chunk.choices[0].delta.content)
|
30 |
+
partial_message += chunk.choices[0].delta.content
|
31 |
+
yield partial_message
|
32 |
+
|
33 |
+
def chat_with_api_key(api_key, message, history):
|
34 |
+
print('in chat_with_api_key')
|
35 |
+
accumulated_message = ""
|
36 |
+
for partial_message in predict(message, history, api_key):
|
37 |
+
accumulated_message = partial_message
|
38 |
+
history.append((message, accumulated_message))
|
39 |
+
yield accumulated_message, history
|
40 |
+
|
41 |
+
def transcribe(audio):
|
42 |
+
if audio is None:
|
43 |
+
return "No audio recorded."
|
44 |
+
sr, y = audio
|
45 |
+
y = y.astype(np.float32)
|
46 |
+
y /= np.max(np.abs(y))
|
47 |
+
|
48 |
+
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
def clear_all():
|
54 |
+
return None, "", ""
|
55 |
+
|
56 |
+
with gr.Blocks() as demo:
|
57 |
+
#output = gr.Textbox(label="Output", elem_id="output")
|
58 |
+
chatbot = gr.Chatbot(label="Chat Output")
|
59 |
+
with gr.Row():
|
60 |
+
api_key = gr.Textbox(label="API Key", placeholder="Enter your API key", type="password")
|
61 |
+
message = gr.Textbox(label="Message")
|
62 |
+
|
63 |
+
gr.Markdown("# Audio Transcription and Question Answering")
|
64 |
+
with gr.Row():
|
65 |
+
audio_input = gr.Audio(label="Audio Input", sources=["microphone"], type="numpy")
|
66 |
+
with gr.Column():
|
67 |
+
transcription_output = gr.Textbox(label="Transcription")
|
68 |
+
clear_button = gr.Button("Clear")
|
69 |
+
state = gr.State([])
|
70 |
+
|
71 |
+
|
72 |
+
def update_output(api_key, audio_input, state):
|
73 |
+
print('in update_output')
|
74 |
+
message = transcribe(audio_input)
|
75 |
+
responses = chat_with_api_key(api_key, message, state)
|
76 |
+
accumulated_response = ""
|
77 |
+
for response, updated_state in responses:
|
78 |
+
accumulated_response = response
|
79 |
+
yield accumulated_response, updated_state
|
80 |
+
|
81 |
+
btn = gr.Button("Submit")
|
82 |
+
btn.click(update_output, inputs=[api_key, message, state], outputs=[chatbot, state])
|
83 |
+
|
84 |
+
audio_input.stop_recording(
|
85 |
+
fn=update_output,
|
86 |
+
inputs=[api_key, audio_input, state],
|
87 |
+
outputs=[chatbot, state]
|
88 |
+
)
|
89 |
+
|
90 |
+
clear_button.click(
|
91 |
+
fn=clear_all,
|
92 |
+
inputs=[],
|
93 |
+
outputs=[audio_input, transcription_output, chatbot]
|
94 |
+
)
|
95 |
+
gr.HTML("""
|
96 |
+
<style>
|
97 |
+
#output {
|
98 |
+
overflow-y: auto;
|
99 |
+
max-height: 800px;
|
100 |
+
}""")
|
101 |
+
demo.launch()
|
app.py
CHANGED
@@ -1,13 +1,42 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
import numpy as np
|
4 |
-
import
|
5 |
|
6 |
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
|
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def transcribe(audio):
|
13 |
if audio is None:
|
@@ -18,45 +47,51 @@ def transcribe(audio):
|
|
18 |
|
19 |
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
20 |
|
21 |
-
def answer(
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
with torch.no_grad():
|
26 |
-
output = model.generate(input_ids, max_length=150, num_return_sequences=1,
|
27 |
-
temperature=0.7, top_k=50, top_p=0.95)
|
28 |
-
|
29 |
-
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
30 |
-
|
31 |
-
# Extract only the answer part
|
32 |
-
answer = response.split("A:")[-1].strip()
|
33 |
-
print(answer)
|
34 |
-
return response
|
35 |
|
36 |
def process_audio(audio):
|
37 |
if audio is None:
|
38 |
-
return "No audio recorded.",
|
39 |
transcription = transcribe(audio)
|
40 |
answer_result = answer(transcription)
|
41 |
-
return transcription, answer_result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
def clear_all():
|
44 |
-
return None, "",
|
45 |
|
46 |
with gr.Blocks() as demo:
|
47 |
-
gr.
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
clear_button.click(
|
62 |
fn=clear_all,
|
@@ -64,4 +99,5 @@ with gr.Blocks() as demo:
|
|
64 |
outputs=[audio_input, transcription_output, answer_output]
|
65 |
)
|
66 |
|
67 |
-
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
import numpy as np
|
4 |
+
from openai import OpenAI
|
5 |
|
6 |
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
|
7 |
+
qa_model = pipeline("question-answering", model="distilbert-base-cased-distilled-squad")
|
8 |
|
9 |
+
def predict(message, history, api_key):
|
10 |
+
print('in predict')
|
11 |
+
client = OpenAI(api_key=api_key)
|
12 |
+
history_openai_format = []
|
13 |
+
for human, assistant in history:
|
14 |
+
history_openai_format.append({"role": "user", "content": human})
|
15 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
16 |
+
history_openai_format.append({"role": "user", "content": message})
|
17 |
+
|
18 |
+
response = client.chat.completions.create(
|
19 |
+
model='gpt-4o',
|
20 |
+
messages=history_openai_format,
|
21 |
+
temperature=1.0,
|
22 |
+
stream=True
|
23 |
+
)
|
24 |
+
|
25 |
+
partial_message = ""
|
26 |
+
for chunk in response:
|
27 |
+
if chunk.choices[0].delta.content:
|
28 |
+
print(111, chunk.choices[0].delta.content)
|
29 |
+
partial_message += chunk.choices[0].delta.content
|
30 |
+
yield partial_message
|
31 |
+
|
32 |
+
def chat_with_api_key(api_key, message, history):
|
33 |
+
print('in chat_with_api_key')
|
34 |
+
accumulated_message = ""
|
35 |
+
for partial_message in predict(message, history, api_key):
|
36 |
+
accumulated_message = partial_message
|
37 |
+
history.append((message, accumulated_message))
|
38 |
+
# yield accumulated_message, history
|
39 |
+
yield message,[[message, accumulated_message]]
|
40 |
|
41 |
def transcribe(audio):
|
42 |
if audio is None:
|
|
|
47 |
|
48 |
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
49 |
|
50 |
+
def answer(transcription):
|
51 |
+
context = "You are a chatbot answering general questions"
|
52 |
+
result = qa_model(question=transcription, context=context)
|
53 |
+
return result['answer']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
def process_audio(audio):
|
56 |
if audio is None:
|
57 |
+
return "No audio recorded.", []
|
58 |
transcription = transcribe(audio)
|
59 |
answer_result = answer(transcription)
|
60 |
+
return transcription, [[transcription, answer_result]]
|
61 |
+
|
62 |
+
def update_output(api_key, audio_input, state):
|
63 |
+
print('in update_output')
|
64 |
+
message = transcribe(audio_input)
|
65 |
+
responses = chat_with_api_key(api_key, message, state)
|
66 |
+
accumulated_response = ""
|
67 |
+
for response, updated_state in responses:
|
68 |
+
accumulated_response = response
|
69 |
+
yield accumulated_response, updated_state
|
70 |
|
71 |
def clear_all():
|
72 |
+
return None, "", []
|
73 |
|
74 |
with gr.Blocks() as demo:
|
75 |
+
answer_output = gr.Chatbot(label="Answer Result")
|
76 |
+
with gr.Row():
|
77 |
+
audio_input = gr.Audio(label="Audio Input", sources=["microphone"], type="numpy")
|
78 |
+
with gr.Column():
|
79 |
+
api_key = gr.Textbox(label="API Key", placeholder="Enter your API key", type="password")
|
80 |
+
transcription_output = gr.Textbox(label="Transcription")
|
81 |
+
clear_button = gr.Button("Clear")
|
82 |
+
state = gr.State([])
|
83 |
+
if 1:
|
84 |
+
audio_input.stop_recording(
|
85 |
+
fn=update_output,
|
86 |
+
inputs=[api_key, audio_input, state],
|
87 |
+
outputs=[transcription_output, answer_output]
|
88 |
+
)
|
89 |
+
if 0:
|
90 |
+
audio_input.stop_recording(
|
91 |
+
fn=process_audio,
|
92 |
+
inputs=[audio_input],
|
93 |
+
outputs=[transcription_output, answer_output]
|
94 |
+
)
|
95 |
|
96 |
clear_button.click(
|
97 |
fn=clear_all,
|
|
|
99 |
outputs=[audio_input, transcription_output, answer_output]
|
100 |
)
|
101 |
|
102 |
+
|
103 |
+
demo.launch()
|
app_Chat_working.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
def predict(message, history, api_key):
|
5 |
+
print('in predict')
|
6 |
+
client = OpenAI(api_key=api_key)
|
7 |
+
history_openai_format = []
|
8 |
+
for human, assistant in history:
|
9 |
+
history_openai_format.append({"role": "user", "content": human})
|
10 |
+
history_openai_format.append({"role": "assistant", "content": assistant})
|
11 |
+
history_openai_format.append({"role": "user", "content": message})
|
12 |
+
|
13 |
+
response = client.chat.completions.create(
|
14 |
+
model='gpt-4o',
|
15 |
+
messages=history_openai_format,
|
16 |
+
temperature=1.0,
|
17 |
+
stream=True
|
18 |
+
)
|
19 |
+
|
20 |
+
partial_message = ""
|
21 |
+
for chunk in response:
|
22 |
+
if chunk.choices[0].delta.content:
|
23 |
+
print(111, chunk.choices[0].delta.content)
|
24 |
+
partial_message += chunk.choices[0].delta.content
|
25 |
+
yield partial_message
|
26 |
+
|
27 |
+
def chat_with_api_key(api_key, message, history):
|
28 |
+
print('in chat_with_api_key')
|
29 |
+
accumulated_message = ""
|
30 |
+
for partial_message in predict(message, history, api_key):
|
31 |
+
accumulated_message = partial_message
|
32 |
+
history.append((message, accumulated_message))
|
33 |
+
yield accumulated_message, history
|
34 |
+
|
35 |
+
with gr.Blocks() as demo:
|
36 |
+
with gr.Row():
|
37 |
+
api_key = gr.Textbox(label="API Key", placeholder="Enter your API key", type="password")
|
38 |
+
message = gr.Textbox(label="Message")
|
39 |
+
state = gr.State([])
|
40 |
+
output = gr.Textbox(label="Output", lines=10)
|
41 |
+
|
42 |
+
def update_output(api_key, message, state):
|
43 |
+
print('in update_output')
|
44 |
+
responses = chat_with_api_key(api_key, message, state)
|
45 |
+
accumulated_response = ""
|
46 |
+
for response, updated_state in responses:
|
47 |
+
accumulated_response = response
|
48 |
+
yield accumulated_response, updated_state
|
49 |
+
|
50 |
+
btn = gr.Button("Submit")
|
51 |
+
btn.click(update_output, inputs=[api_key, message, state], outputs=[output, state])
|
52 |
+
|
53 |
+
demo.launch()
|