Spaces:
Running
Running
Update script1.js
Browse files- script1.js +398 -70
script1.js
CHANGED
@@ -2,9 +2,11 @@
|
|
2 |
const USER_SPEECH_INTERRUPT_DELAY = 500;
|
3 |
const TEXT_TO_SPEECH_API_ENDPOINT = "https://api.streamelements.com/kappa/v2/speech";
|
4 |
const CHUNK_SIZE = 300;
|
5 |
-
const MAX_PREFETCH_REQUESTS =
|
6 |
const PREFETCH_CACHE_EXPIRATION = 60000; // 1 minute
|
7 |
const AUDIO_CACHE_EXPIRATION = 3600000; // 1 hour
|
|
|
|
|
8 |
|
9 |
// DOM Elements
|
10 |
const startStopButton = document.getElementById('startStopButton');
|
@@ -15,7 +17,6 @@ const responseTimeDisplay = document.getElementById('responseTime');
|
|
15 |
const userActivityIndicator = document.getElementById('userIndicator');
|
16 |
const aiActivityIndicator = document.getElementById('aiIndicator');
|
17 |
const transcriptDiv = document.getElementById('transcript');
|
18 |
-
const webcamToggleButton = document.getElementById('webcamToggle');
|
19 |
|
20 |
// Speech Recognition
|
21 |
let speechRecognizer;
|
@@ -45,21 +46,9 @@ let conversationHistory = [];
|
|
45 |
// Audio Caching
|
46 |
const audioCache = new Map();
|
47 |
|
48 |
-
// Webcam
|
49 |
-
let isWebcamActive = false;
|
50 |
-
let app;
|
51 |
-
let lastCaption = "";
|
52 |
-
|
53 |
-
const clients = [
|
54 |
-
"multimodalart/Florence-2-l4",
|
55 |
-
"gokaygokay/Florence-2",
|
56 |
-
"multimodalart/Florence-2-l4-2",
|
57 |
-
"gokaygokay/Florence-2",
|
58 |
-
];
|
59 |
-
|
60 |
// Utility Functions
|
61 |
|
62 |
-
// Normalize query text
|
63 |
const normalizeQueryText = query => query.trim().toLowerCase().replace(/[^\w\s]/g, '');
|
64 |
|
65 |
// Generate a cache key
|
@@ -69,6 +58,7 @@ const generateCacheKey = (normalizedQuery, voice, history, modelName) =>
|
|
69 |
// Update activity indicators
|
70 |
const updateActivityIndicators = (state = null) => {
|
71 |
userActivityIndicator.textContent = isUserSpeaking ? "User: Speaking" : "User: Idle";
|
|
|
72 |
if (isRequestInProgress && !currentAudio) {
|
73 |
aiActivityIndicator.textContent = "AI: Processing...";
|
74 |
} else if (currentAudio && !isUserSpeaking) {
|
@@ -92,16 +82,16 @@ const updateLatency = () => {
|
|
92 |
|
93 |
// Add to conversation history
|
94 |
const addToConversationHistory = (role, content) => {
|
95 |
-
|
96 |
-
|
97 |
-
conversationHistory
|
98 |
-
conversationHistory.pop();
|
99 |
}
|
|
|
100 |
|
101 |
-
|
|
|
|
|
102 |
|
103 |
-
if (conversationHistory.length > 6) conversationHistory.splice(0, 2);
|
104 |
-
};
|
105 |
|
106 |
// Audio Management Functions
|
107 |
|
@@ -130,6 +120,29 @@ const playNextAudio = async () => {
|
|
130 |
}
|
131 |
};
|
132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
// Prefetching and Caching Functions
|
134 |
|
135 |
// Prefetch and cache the first TTS audio chunk
|
@@ -143,84 +156,294 @@ const prefetchFirstAudioChunk = (query, voice) => {
|
|
143 |
processPrefetchQueue();
|
144 |
};
|
145 |
|
146 |
-
//
|
147 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
try {
|
149 |
-
|
150 |
-
|
151 |
-
|
|
|
|
|
|
|
|
|
|
|
152 |
} catch (error) {
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
154 |
}
|
155 |
};
|
156 |
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
|
|
|
|
162 |
}
|
163 |
};
|
164 |
|
165 |
-
const captureAndProcessImage = async () => {
|
166 |
-
if (!isWebcamActive) return;
|
167 |
|
168 |
-
|
169 |
-
const video = document.getElementById('webcam');
|
170 |
-
canvas.width = video.videoWidth;
|
171 |
-
canvas.height = video.videoHeight;
|
172 |
-
const context = canvas.getContext('2d');
|
173 |
-
context.drawImage(video, 0, 0, canvas.width, canvas.height);
|
174 |
|
175 |
-
|
176 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
};
|
178 |
|
179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
try {
|
181 |
-
|
182 |
-
|
183 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
|
185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
} catch (error) {
|
190 |
-
console.error("Error
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
}
|
192 |
};
|
193 |
|
194 |
-
//
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
|
|
|
|
|
|
|
|
|
|
201 |
}
|
202 |
-
});
|
203 |
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
|
|
|
|
|
|
|
|
|
|
210 |
}
|
211 |
-
}
|
|
|
212 |
|
213 |
// Speech Recognition Initialization
|
|
|
214 |
if ('webkitSpeechRecognition' in window) {
|
215 |
speechRecognizer = new webkitSpeechRecognition();
|
216 |
-
speechRecognizer
|
217 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
speechRecognizer.onresult = (event) => {
|
220 |
let interimTranscript = '';
|
221 |
for (let i = event.resultIndex; i < event.results.length; i++) {
|
222 |
const transcript = event.results[i][0].transcript;
|
223 |
if (event.results[i].isFinal) {
|
|
|
224 |
processSpeechTranscript(transcript);
|
225 |
isUserSpeaking = false;
|
226 |
updateActivityIndicators();
|
@@ -228,14 +451,119 @@ if ('webkitSpeechRecognition' in window) {
|
|
228 |
} else {
|
229 |
interimTranscript += transcript;
|
230 |
isUserSpeaking = true;
|
|
|
231 |
updateActivityIndicators();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
}
|
233 |
}
|
234 |
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
}
|
236 |
|
237 |
setInterval(updateLatency, 100);
|
238 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
window.onload = () => {
|
240 |
-
|
241 |
-
|
|
|
|
|
|
|
|
2 |
const USER_SPEECH_INTERRUPT_DELAY = 500;
|
3 |
const TEXT_TO_SPEECH_API_ENDPOINT = "https://api.streamelements.com/kappa/v2/speech";
|
4 |
const CHUNK_SIZE = 300;
|
5 |
+
const MAX_PREFETCH_REQUESTS = 5; // Reduced to avoid overloading
|
6 |
const PREFETCH_CACHE_EXPIRATION = 60000; // 1 minute
|
7 |
const AUDIO_CACHE_EXPIRATION = 3600000; // 1 hour
|
8 |
+
const WEBCAM_INTERVAL = 5000;
|
9 |
+
const MAX_HISTORY_LENGTH = 6; // Limit history for better performance
|
10 |
|
11 |
// DOM Elements
|
12 |
const startStopButton = document.getElementById('startStopButton');
|
|
|
17 |
const userActivityIndicator = document.getElementById('userIndicator');
|
18 |
const aiActivityIndicator = document.getElementById('aiIndicator');
|
19 |
const transcriptDiv = document.getElementById('transcript');
|
|
|
20 |
|
21 |
// Speech Recognition
|
22 |
let speechRecognizer;
|
|
|
46 |
// Audio Caching
|
47 |
const audioCache = new Map();
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
// Utility Functions
|
50 |
|
51 |
+
// Normalize query text
|
52 |
const normalizeQueryText = query => query.trim().toLowerCase().replace(/[^\w\s]/g, '');
|
53 |
|
54 |
// Generate a cache key
|
|
|
58 |
// Update activity indicators
|
59 |
const updateActivityIndicators = (state = null) => {
|
60 |
userActivityIndicator.textContent = isUserSpeaking ? "User: Speaking" : "User: Idle";
|
61 |
+
|
62 |
if (isRequestInProgress && !currentAudio) {
|
63 |
aiActivityIndicator.textContent = "AI: Processing...";
|
64 |
} else if (currentAudio && !isUserSpeaking) {
|
|
|
82 |
|
83 |
// Add to conversation history
|
84 |
const addToConversationHistory = (role, content) => {
|
85 |
+
conversationHistory.push({ role, content });
|
86 |
+
if (conversationHistory.length > MAX_HISTORY_LENGTH) {
|
87 |
+
conversationHistory.splice(0, conversationHistory.length - MAX_HISTORY_LENGTH);
|
|
|
88 |
}
|
89 |
+
};
|
90 |
|
91 |
+
// Check if audio playback should be interrupted
|
92 |
+
const shouldInterruptAudioPlayback = (interimTranscript) =>
|
93 |
+
Date.now() - lastUserSpeechTimestamp > USER_SPEECH_INTERRUPT_DELAY || interimTranscript.length > 5;
|
94 |
|
|
|
|
|
95 |
|
96 |
// Audio Management Functions
|
97 |
|
|
|
120 |
}
|
121 |
};
|
122 |
|
123 |
+
// Interrupt audio playback
|
124 |
+
const interruptAudioPlayback = (reason = 'unknown') => {
|
125 |
+
console.log(`Interrupting audio (reason: ${reason})...`);
|
126 |
+
if (currentAudio) {
|
127 |
+
currentAudio.pause();
|
128 |
+
currentAudio.currentTime = 0;
|
129 |
+
currentAudio = null;
|
130 |
+
}
|
131 |
+
|
132 |
+
audioPlaybackQueue.length = 0;
|
133 |
+
isRequestInProgress = false;
|
134 |
+
|
135 |
+
if (requestAbortController) {
|
136 |
+
requestAbortController.abort();
|
137 |
+
requestAbortController = null;
|
138 |
+
}
|
139 |
+
|
140 |
+
prefetchCache.clear();
|
141 |
+
prefetchQueue.length = 0;
|
142 |
+
updateActivityIndicators();
|
143 |
+
};
|
144 |
+
|
145 |
+
|
146 |
// Prefetching and Caching Functions
|
147 |
|
148 |
// Prefetch and cache the first TTS audio chunk
|
|
|
156 |
processPrefetchQueue();
|
157 |
};
|
158 |
|
159 |
+
// Process the prefetch queue
|
160 |
+
const processPrefetchQueue = async () => {
|
161 |
+
while (prefetchQueue.length > 0 && pendingPrefetchRequests.size < MAX_PREFETCH_REQUESTS) {
|
162 |
+
const { query, voice, cacheKey } = prefetchQueue.shift();
|
163 |
+
const abortController = new AbortController();
|
164 |
+
pendingPrefetchRequests.set(cacheKey, abortController);
|
165 |
+
|
166 |
+
try {
|
167 |
+
const firstAudioUrl = await streamAndPrefetchAudio(query, voice, abortController.signal);
|
168 |
+
|
169 |
+
if (firstAudioUrl) prefetchCache.set(cacheKey, { url: firstAudioUrl, timestamp: Date.now() });
|
170 |
+
|
171 |
+
} catch (error) {
|
172 |
+
if (error.name !== 'AbortError') console.error("Error prefetching audio:", error);
|
173 |
+
} finally {
|
174 |
+
pendingPrefetchRequests.delete(cacheKey);
|
175 |
+
processPrefetchQueue();
|
176 |
+
}
|
177 |
+
}
|
178 |
+
};
|
179 |
+
|
180 |
+
// Cancel pending prefetch requests
|
181 |
+
const cancelPrefetchRequests = (query) => {
|
182 |
+
const normalizedQuery = normalizeQueryText(query);
|
183 |
+
|
184 |
+
for (const [cacheKey, abortController] of pendingPrefetchRequests) {
|
185 |
+
if (cacheKey.startsWith(normalizedQuery)) {
|
186 |
+
abortController.abort();
|
187 |
+
pendingPrefetchRequests.delete(cacheKey);
|
188 |
+
}
|
189 |
+
}
|
190 |
+
};
|
191 |
+
|
192 |
+
|
193 |
+
// AI Interaction Functions
|
194 |
+
|
195 |
+
// Send a query to the AI
|
196 |
+
async function sendQueryToAI(query) {
|
197 |
+
console.log("Sending query to AI:", query);
|
198 |
+
isRequestInProgress = true;
|
199 |
+
updateActivityIndicators();
|
200 |
+
firstResponseTextTimestamp = null;
|
201 |
+
queryStartTime = Date.now();
|
202 |
+
|
203 |
+
const cacheKey = generateCacheKey(query, voiceSelectionDropdown.value, conversationHistory, modelSelectionDropdown.value);
|
204 |
+
|
205 |
try {
|
206 |
+
let combinedQuery = `{USER: "${query}"}`;
|
207 |
+
if (isCaptioningEnabled && lastCaption) {
|
208 |
+
combinedQuery += `, ${lastCaption}`;
|
209 |
+
}
|
210 |
+
combinedQuery += `, {USER: "${query}"}`;
|
211 |
+
|
212 |
+
await streamAndHandleAudioResponse(combinedQuery, voiceSelectionDropdown.value, requestAbortController.signal);
|
213 |
+
|
214 |
} catch (error) {
|
215 |
+
if (error.name !== 'AbortError') {
|
216 |
+
console.error("Error sending query to AI:", error);
|
217 |
+
}
|
218 |
+
} finally {
|
219 |
+
isRequestInProgress = false;
|
220 |
+
updateActivityIndicators();
|
221 |
}
|
222 |
};
|
223 |
|
224 |
+
// Process the final speech transcript
|
225 |
+
const processSpeechTranscript = (transcript) => {
|
226 |
+
const trimmedTranscript = transcript.trimStart();
|
227 |
+
if (trimmedTranscript !== '' && !isRequestInProgress) {
|
228 |
+
activeQuery = trimmedTranscript;
|
229 |
+
addToConversationHistory('user', activeQuery); // Add history before sending
|
230 |
+
sendQueryToAI(activeQuery);
|
231 |
}
|
232 |
};
|
233 |
|
|
|
|
|
234 |
|
235 |
+
// Network and Streaming Functions
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
+
// Stream AI response and handle audio
|
238 |
+
const streamAndHandleAudioResponse = async (query, voice, abortSignal) => {
|
239 |
+
const response = await fetchAIResponse(query, abortSignal);
|
240 |
+
|
241 |
+
if (!response.ok) {
|
242 |
+
if (response.status === 429) {
|
243 |
+
console.log("Rate limit hit, retrying in 1 second...");
|
244 |
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
245 |
+
await sendQueryToAI(query);
|
246 |
+
return;
|
247 |
+
}
|
248 |
+
throw new Error(`Network response was not ok: ${response.status}`);
|
249 |
+
}
|
250 |
+
|
251 |
+
console.log("Streaming audio response received");
|
252 |
+
await handleStreamingResponse(response.body, voice, abortSignal);
|
253 |
};
|
254 |
|
255 |
+
// Stream AI response for prefetching
|
256 |
+
const streamAndPrefetchAudio = async (query, voice, abortSignal) => {
|
257 |
+
const response = await fetchAIResponse(query, abortSignal);
|
258 |
+
|
259 |
+
if (!response.ok) throw new Error('Network response was not ok');
|
260 |
+
|
261 |
+
return handleStreamingResponseForPrefetch(response.body, voice, abortSignal);
|
262 |
+
};
|
263 |
+
|
264 |
+
// Fetch AI response
|
265 |
+
const fetchAIResponse = async (query, abortSignal) => {
|
266 |
+
const userSambanovaKey = document.getElementById('apiKey').value.trim() !== '' ? document.getElementById('apiKey').value.trim() : 'none';
|
267 |
+
|
268 |
+
const url = '/stream_text';
|
269 |
+
const requestBody = {
|
270 |
+
query: query,
|
271 |
+
history: JSON.stringify(conversationHistory),
|
272 |
+
model: modelSelectionDropdown.value,
|
273 |
+
api_key: userSambanovaKey
|
274 |
+
};
|
275 |
+
|
276 |
+
return fetch(url, {
|
277 |
+
method: 'POST',
|
278 |
+
headers: {
|
279 |
+
'Accept': 'text/event-stream',
|
280 |
+
'Content-Type': 'application/json'
|
281 |
+
},
|
282 |
+
body: JSON.stringify(requestBody),
|
283 |
+
signal: abortSignal
|
284 |
+
});
|
285 |
+
};
|
286 |
+
|
287 |
+
// Handle the streaming response for prefetching
|
288 |
+
const handleStreamingResponseForPrefetch = async (responseStream, voice, abortSignal) => {
|
289 |
+
const reader = responseStream.getReader();
|
290 |
+
const decoder = new TextDecoder("utf-8");
|
291 |
+
let buffer = "";
|
292 |
+
|
293 |
try {
|
294 |
+
while (true) {
|
295 |
+
const { done, value } = await reader.read();
|
296 |
+
if (done) break;
|
297 |
+
if (abortSignal.aborted) throw new DOMException('Request aborted', 'AbortError');
|
298 |
+
|
299 |
+
const chunk = decoder.decode(value, { stream: true });
|
300 |
+
buffer += chunk;
|
301 |
+
const lines = buffer.split('\n');
|
302 |
+
|
303 |
+
for (let i = 0; i < lines.length - 1; i++) {
|
304 |
+
const line = lines[i];
|
305 |
+
if (line.startsWith('data: ')) {
|
306 |
+
const textContent = line.substring(6).trim();
|
307 |
+
if (textContent) {
|
308 |
+
return await generateTextToSpeechAudio(textContent, voice);
|
309 |
+
}
|
310 |
+
}
|
311 |
+
}
|
312 |
|
313 |
+
buffer = lines[lines.length - 1];
|
314 |
+
}
|
315 |
+
} catch (error) {
|
316 |
+
console.error("Error in handleStreamingResponseForPrefetch:", error);
|
317 |
+
} finally {
|
318 |
+
reader.releaseLock();
|
319 |
+
}
|
320 |
|
321 |
+
return null;
|
322 |
+
};
|
323 |
+
|
324 |
+
// Handle the streaming audio response
|
325 |
+
const handleStreamingResponse = async (responseStream, voice, abortSignal) => {
|
326 |
+
const reader = responseStream.getReader();
|
327 |
+
const decoder = new TextDecoder("utf-8");
|
328 |
+
let buffer = "";
|
329 |
+
let fullResponseText = "";
|
330 |
+
let fullResponseText2 = "";
|
331 |
+
let textChunk = "";
|
332 |
+
|
333 |
+
|
334 |
+
try {
|
335 |
+
while (true) {
|
336 |
+
const { done, value } = await reader.read();
|
337 |
+
if (done) break;
|
338 |
+
if (abortSignal.aborted) throw new DOMException('Request aborted', 'AbortError');
|
339 |
+
|
340 |
+
if (isUserSpeaking) {
|
341 |
+
interruptAudioPlayback('user is speaking');
|
342 |
+
break;
|
343 |
+
}
|
344 |
+
|
345 |
+
const chunk = decoder.decode(value, { stream: true });
|
346 |
+
buffer += chunk;
|
347 |
+
const lines = buffer.split('\n');
|
348 |
+
|
349 |
+
for (const line of lines) { // Simplified loop
|
350 |
+
if (line.startsWith('data: ')) {
|
351 |
+
const textContent = line.substring(6).trim();
|
352 |
+
if (textContent) {
|
353 |
+
if (!firstResponseTextTimestamp) firstResponseTextTimestamp = Date.now();
|
354 |
+
fullResponseText += textContent + " "; // Accumulate full response
|
355 |
+
fullResponseText2 += textContent + " ";
|
356 |
+
textChunk += textContent + " ";
|
357 |
+
transcriptDiv.textContent = fullResponseText2;
|
358 |
+
|
359 |
+
|
360 |
+
if (textChunk.length >= CHUNK_SIZE) {
|
361 |
+
const audioUrl = await generateTextToSpeechAudio(textChunk, voice);
|
362 |
+
if (audioUrl) {
|
363 |
+
audioPlaybackQueue.push({ url: audioUrl });
|
364 |
+
if (!currentAudio) playNextAudio();
|
365 |
+
}
|
366 |
+
textChunk = ""; // Clear after sending
|
367 |
+
}
|
368 |
+
}
|
369 |
+
}
|
370 |
+
}
|
371 |
+
}
|
372 |
} catch (error) {
|
373 |
+
console.error("Error in handleStreamingResponse:", error);
|
374 |
+
} finally {
|
375 |
+
// ... (Send any remaining textChunk)
|
376 |
+
if (textChunk !== "") {
|
377 |
+
const audioUrl = await generateTextToSpeechAudio(textChunk, voice);
|
378 |
+
if (audioUrl) {
|
379 |
+
audioPlaybackQueue.push({ url: audioUrl });
|
380 |
+
if (!currentAudio) playNextAudio();
|
381 |
+
}
|
382 |
+
}
|
383 |
+
|
384 |
+
addToConversationHistory('assistant', fullResponseText2);
|
385 |
+
fullResponseText = "";
|
386 |
+
fullResponseText2 = "";
|
387 |
+
|
388 |
+
reader.releaseLock();
|
389 |
+
|
390 |
}
|
391 |
};
|
392 |
|
393 |
+
// Generate Text-to-Speech audio with caching
|
394 |
+
const generateTextToSpeechAudio = async (text, voice) => {
|
395 |
+
const normalizedText = normalizeQueryText(text);
|
396 |
+
const cacheKey = `${normalizedText}-${voice}`;
|
397 |
+
|
398 |
+
if (audioCache.has(cacheKey)) {
|
399 |
+
const cachedData = audioCache.get(cacheKey);
|
400 |
+
if (Date.now() - cachedData.timestamp < AUDIO_CACHE_EXPIRATION) {
|
401 |
+
return cachedData.url;
|
402 |
+
} else {
|
403 |
+
audioCache.delete(cacheKey);
|
404 |
+
}
|
405 |
}
|
|
|
406 |
|
407 |
+
try {
|
408 |
+
const response = await fetch(`${TEXT_TO_SPEECH_API_ENDPOINT}?voice=${voice}&text=${encodeURIComponent(text)}`, { method: 'GET' });
|
409 |
+
if (!response.ok) throw new Error('Network response was not ok');
|
410 |
+
const audioBlob = await response.blob();
|
411 |
+
const audioUrl = URL.createObjectURL(audioBlob);
|
412 |
+
|
413 |
+
audioCache.set(cacheKey, { url: audioUrl, timestamp: Date.now() });
|
414 |
+
return audioUrl;
|
415 |
+
} catch (error) {
|
416 |
+
console.error("Error generating TTS audio:", error);
|
417 |
+
return null;
|
418 |
}
|
419 |
+
};
|
420 |
+
|
421 |
|
422 |
// Speech Recognition Initialization
|
423 |
+
|
424 |
if ('webkitSpeechRecognition' in window) {
|
425 |
speechRecognizer = new webkitSpeechRecognition();
|
426 |
+
Object.assign(speechRecognizer, {
|
427 |
+
continuous: true,
|
428 |
+
interimResults: true,
|
429 |
+
language: 'en-US',
|
430 |
+
maxAlternatives: 3
|
431 |
+
});
|
432 |
+
|
433 |
+
speechRecognizer.onstart = () => {
|
434 |
+
console.log("Speech recognition started");
|
435 |
+
isUserSpeaking = true;
|
436 |
+
lastUserSpeechTimestamp = Date.now();
|
437 |
+
updateActivityIndicators();
|
438 |
+
startStopButton.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M9 9h6v6h-6z"></path><path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"></path><path d="M19 10v2a7 7 0 0 1-14 0v-2"></path><line x1="12" y1="19" x2="12" y2="23"></line><line x1="8" y1="23" x2="16" y2="23"></line></svg> Stop Listening';
|
439 |
+
};
|
440 |
|
441 |
speechRecognizer.onresult = (event) => {
|
442 |
let interimTranscript = '';
|
443 |
for (let i = event.resultIndex; i < event.results.length; i++) {
|
444 |
const transcript = event.results[i][0].transcript;
|
445 |
if (event.results[i].isFinal) {
|
446 |
+
interruptAudioPlayback('final');
|
447 |
processSpeechTranscript(transcript);
|
448 |
isUserSpeaking = false;
|
449 |
updateActivityIndicators();
|
|
|
451 |
} else {
|
452 |
interimTranscript += transcript;
|
453 |
isUserSpeaking = true;
|
454 |
+
lastUserSpeechTimestamp = Date.now();
|
455 |
updateActivityIndicators();
|
456 |
+
|
457 |
+
if (interimTranscript.length > prefetchTextQuery.length + 5) {
|
458 |
+
cancelPrefetchRequests(prefetchTextQuery);
|
459 |
+
}
|
460 |
+
prefetchTextQuery = interimTranscript;
|
461 |
+
prefetchFirstAudioChunk(interimTranscript, voiceSelectionDropdown.value);
|
462 |
+
|
463 |
+
if (isRequestInProgress && shouldInterruptAudioPlayback(interimTranscript)) {
|
464 |
+
interruptAudioPlayback('interim');
|
465 |
+
}
|
466 |
}
|
467 |
}
|
468 |
};
|
469 |
+
|
470 |
+
speechRecognizer.onerror = (event) => {
|
471 |
+
console.error('Speech recognition error:', event.error);
|
472 |
+
if (isSpeechRecognitionActive) speechRecognizer.start();
|
473 |
+
};
|
474 |
+
|
475 |
+
speechRecognizer.onend = () => {
|
476 |
+
isUserSpeaking = false;
|
477 |
+
updateActivityIndicators();
|
478 |
+
|
479 |
+
if (isSpeechRecognitionActive) speechRecognizer.start();
|
480 |
+
};
|
481 |
+
|
482 |
+
startStopButton.addEventListener('click', () => {
|
483 |
+
if (isSpeechRecognitionActive) {
|
484 |
+
speechRecognizer.stop();
|
485 |
+
isSpeechRecognitionActive = false;
|
486 |
+
startStopButton.innerHTML = '<svg id="microphoneIcon" xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"></path><path d="M19 10v2a7 7 0 0 1-14 0v-2"></path><line x1="12" y1="19" x2="12" y2="23"></line><line x1="8" y1="23" x2="16" y2="23"></line></svg> Start Listening';
|
487 |
+
|
488 |
+
// Stop webcam capture when speech recognition stops
|
489 |
+
clearInterval(webcamInterval);
|
490 |
+
video.srcObject = null;
|
491 |
+
lastCaption = "";
|
492 |
+
isCaptioningEnabled = false;
|
493 |
+
|
494 |
+
} else {
|
495 |
+
speechRecognizer.start();
|
496 |
+
isSpeechRecognitionActive = true;
|
497 |
+
startStopButton.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M9 9h6v6h-6z"></path><path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"></path><path d="M19 10v2a7 7 0 0 1-14 0v-2"></path><line x1="12" y1="19" x2="12" y2="23"></line><line x1="8" y1="23" x2="16" y2="23"></line></svg> Stop Listening';
|
498 |
+
|
499 |
+
// Start webcam capture when speech recognition starts
|
500 |
+
isCaptioningEnabled = true;
|
501 |
+
startWebcam();
|
502 |
+
}
|
503 |
+
});
|
504 |
+
} else {
|
505 |
+
alert('Your browser does not support the Web Speech API.');
|
506 |
}
|
507 |
|
508 |
setInterval(updateLatency, 100);
|
509 |
|
510 |
+
|
511 |
+
|
512 |
+
// Webcam Integration
|
513 |
+
import { client, handle_file } from 'https://cdn.jsdelivr.net/npm/@gradio/client/+esm';
|
514 |
+
|
515 |
+
const video = document.getElementById('webcam');
|
516 |
+
let app;
|
517 |
+
let lastCaption = "";
|
518 |
+
|
519 |
+
const clients = [
|
520 |
+
"multimodalart/Florence-2-l4",
|
521 |
+
"gokaygokay/Florence-2",
|
522 |
+
"multimodalart/Florence-2-l4-2",
|
523 |
+
"gokaygokay/Florence-2",
|
524 |
+
];
|
525 |
+
|
526 |
+
let webcamInterval; // Store the interval ID
|
527 |
+
|
528 |
+
async function startWebcam() {
|
529 |
+
try {
|
530 |
+
const stream = await navigator.mediaDevices.getUserMedia({ video: true });
|
531 |
+
video.srcObject = stream;
|
532 |
+
webcamInterval = setInterval(captureAndProcessImage, WEBCAM_INTERVAL); // Set interval only once
|
533 |
+
} catch (error) {
|
534 |
+
console.error("Error accessing webcam: ", error);
|
535 |
+
}
|
536 |
+
}
|
537 |
+
|
538 |
+
async function captureAndProcessImage() {
|
539 |
+
const canvas = document.createElement('canvas');
|
540 |
+
canvas.width = video.videoWidth;
|
541 |
+
canvas.height = video.videoHeight;
|
542 |
+
const context = canvas.getContext('2d');
|
543 |
+
context.drawImage(video, 0, 0, canvas.width, canvas.height);
|
544 |
+
|
545 |
+
const blob = await new Promise(resolve => canvas.toBlob(resolve, 'image/png'));
|
546 |
+
await processWithGradio(blob);
|
547 |
+
}
|
548 |
+
|
549 |
+
async function processWithGradio(imageBlob) {
|
550 |
+
try {
|
551 |
+
const randomClient = clients[Math.floor(Math.random() * clients.length)];
|
552 |
+
app = await client(randomClient);
|
553 |
+
const handledFile = await handle_file(imageBlob);
|
554 |
+
|
555 |
+
const result = await app.predict("/process_image", [handledFile, "Detailed Caption"]);
|
556 |
+
|
557 |
+
const dataString = result.data[0];
|
558 |
+
lastCaption = dataString || lastCaption;
|
559 |
+
} catch (error) {
|
560 |
+
console.error("Error processing with Gradio:", error);
|
561 |
+
}
|
562 |
+
}
|
563 |
+
|
564 |
window.onload = () => {
|
565 |
+
// Start webcam only if speech recognition is active
|
566 |
+
if (isCaptioningEnabled) {
|
567 |
+
startWebcam();
|
568 |
+
}
|
569 |
+
};
|