Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -240,30 +240,36 @@
|
|
| 240 |
input.value = '';
|
| 241 |
|
| 242 |
updateMessagesDisplay();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
setLoading(true);
|
|
|
|
| 244 |
|
| 245 |
try {
|
| 246 |
const aiResponse = await getAIResponse(message);
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
|
| 254 |
// Track activity after receiving response
|
| 255 |
trackActivity();
|
| 256 |
} catch (error) {
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
timestamp: new Date().toISOString()
|
| 261 |
-
};
|
| 262 |
-
messages.push(errorMessage);
|
| 263 |
}
|
| 264 |
|
| 265 |
setLoading(false);
|
| 266 |
-
updateMessagesDisplay();
|
| 267 |
}
|
| 268 |
|
| 269 |
async function getAIResponse(userMessage) {
|
|
@@ -293,7 +299,7 @@
|
|
| 293 |
const data = {
|
| 294 |
model: selectedModel,
|
| 295 |
messages: apiMessages,
|
| 296 |
-
stream:
|
| 297 |
max_tokens: 2000,
|
| 298 |
temperature: 0.7
|
| 299 |
};
|
|
@@ -309,11 +315,61 @@
|
|
| 309 |
throw new Error("API Error: " + response.status + " - " + errorText);
|
| 310 |
}
|
| 311 |
|
| 312 |
-
const
|
| 313 |
-
const
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 314 |
const modelName = models[selectedModel] || "AI";
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 315 |
|
| 316 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 317 |
}
|
| 318 |
|
| 319 |
function setLoading(loading) {
|
|
@@ -355,7 +411,10 @@
|
|
| 355 |
let content = message.content;
|
| 356 |
let attribution = '';
|
| 357 |
|
| 358 |
-
|
|
|
|
|
|
|
|
|
|
| 359 |
const parts = content.split('\n\n---\n*Response created by:');
|
| 360 |
content = parts[0];
|
| 361 |
if (parts[1]) {
|
|
@@ -377,21 +436,16 @@
|
|
| 377 |
`;
|
| 378 |
});
|
| 379 |
|
| 380 |
-
if
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
<div class="
|
| 387 |
-
|
| 388 |
-
<div class="w-2 h-2 bg-gray-400 rounded-full animate-bounce loading-dot-1"></div>
|
| 389 |
-
<div class="w-2 h-2 bg-gray-400 rounded-full animate-bounce loading-dot-2"></div>
|
| 390 |
-
<div class="w-2 h-2 bg-gray-400 rounded-full animate-bounce loading-dot-3"></div>
|
| 391 |
-
</div>
|
| 392 |
-
</div>
|
| 393 |
</div>
|
| 394 |
-
|
| 395 |
}
|
| 396 |
|
| 397 |
messagesArea.innerHTML = html;
|
|
|
|
| 240 |
input.value = '';
|
| 241 |
|
| 242 |
updateMessagesDisplay();
|
| 243 |
+
|
| 244 |
+
// Add empty assistant message for streaming
|
| 245 |
+
const assistantMessage = {
|
| 246 |
+
role: 'assistant',
|
| 247 |
+
content: '',
|
| 248 |
+
timestamp: new Date().toISOString()
|
| 249 |
+
};
|
| 250 |
+
messages.push(assistantMessage);
|
| 251 |
+
|
| 252 |
setLoading(true);
|
| 253 |
+
updateMessagesDisplay(); // Show the empty assistant message with loading
|
| 254 |
|
| 255 |
try {
|
| 256 |
const aiResponse = await getAIResponse(message);
|
| 257 |
+
|
| 258 |
+
// Update the last message with final response
|
| 259 |
+
messages[messages.length - 1].content = aiResponse;
|
| 260 |
+
|
| 261 |
+
// Remove cursor and show final response
|
| 262 |
+
updateMessagesDisplay();
|
| 263 |
|
| 264 |
// Track activity after receiving response
|
| 265 |
trackActivity();
|
| 266 |
} catch (error) {
|
| 267 |
+
// Replace the empty message with error
|
| 268 |
+
messages[messages.length - 1].content = 'Sorry, I encountered an error. Please try again.';
|
| 269 |
+
updateMessagesDisplay();
|
|
|
|
|
|
|
|
|
|
| 270 |
}
|
| 271 |
|
| 272 |
setLoading(false);
|
|
|
|
| 273 |
}
|
| 274 |
|
| 275 |
async function getAIResponse(userMessage) {
|
|
|
|
| 299 |
const data = {
|
| 300 |
model: selectedModel,
|
| 301 |
messages: apiMessages,
|
| 302 |
+
stream: true, // Enable streaming
|
| 303 |
max_tokens: 2000,
|
| 304 |
temperature: 0.7
|
| 305 |
};
|
|
|
|
| 315 |
throw new Error("API Error: " + response.status + " - " + errorText);
|
| 316 |
}
|
| 317 |
|
| 318 |
+
const reader = response.body.getReader();
|
| 319 |
+
const decoder = new TextDecoder();
|
| 320 |
+
let fullResponse = "";
|
| 321 |
+
|
| 322 |
+
try {
|
| 323 |
+
while (true) {
|
| 324 |
+
const { done, value } = await reader.read();
|
| 325 |
+
if (done) break;
|
| 326 |
+
|
| 327 |
+
const chunk = decoder.decode(value);
|
| 328 |
+
const lines = chunk.split('\n');
|
| 329 |
+
|
| 330 |
+
for (const line of lines) {
|
| 331 |
+
if (line.startsWith('data: ')) {
|
| 332 |
+
const data = line.slice(6);
|
| 333 |
+
if (data === '[DONE]') {
|
| 334 |
+
const modelName = models[selectedModel] || "AI";
|
| 335 |
+
return fullResponse + "\n\n---\n*Response created by: **" + modelName + "***";
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
try {
|
| 339 |
+
const parsed = JSON.parse(data);
|
| 340 |
+
const delta = parsed.choices?.[0]?.delta?.content || '';
|
| 341 |
+
if (delta) {
|
| 342 |
+
fullResponse += delta;
|
| 343 |
+
// Update the display in real-time
|
| 344 |
+
updateStreamingResponse(fullResponse);
|
| 345 |
+
}
|
| 346 |
+
} catch (e) {
|
| 347 |
+
// Skip invalid JSON
|
| 348 |
+
continue;
|
| 349 |
+
}
|
| 350 |
+
}
|
| 351 |
+
}
|
| 352 |
+
}
|
| 353 |
+
} finally {
|
| 354 |
+
reader.releaseLock();
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
const modelName = models[selectedModel] || "AI";
|
| 358 |
+
return fullResponse + "\n\n---\n*Response created by: **" + modelName + "***";
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
function updateStreamingResponse(partialResponse) {
|
| 362 |
+
// Find the last message area and update it with streaming text
|
| 363 |
+
const messagesArea = document.getElementById('messages-area');
|
| 364 |
+
const messageElements = messagesArea.children;
|
| 365 |
|
| 366 |
+
if (messageElements.length > 0) {
|
| 367 |
+
const lastMessage = messageElements[messageElements.length - 1];
|
| 368 |
+
const contentDiv = lastMessage.querySelector('.flex-1 .whitespace-pre-wrap');
|
| 369 |
+
if (contentDiv) {
|
| 370 |
+
contentDiv.textContent = partialResponse + " ▌"; // Add cursor
|
| 371 |
+
}
|
| 372 |
+
}
|
| 373 |
}
|
| 374 |
|
| 375 |
function setLoading(loading) {
|
|
|
|
| 411 |
let content = message.content;
|
| 412 |
let attribution = '';
|
| 413 |
|
| 414 |
+
// Handle empty assistant messages (streaming in progress)
|
| 415 |
+
if (message.role === 'assistant' && content === '' && isLoading) {
|
| 416 |
+
content = '';
|
| 417 |
+
} else if (message.role === 'assistant' && content.includes('---\n*Response created by:')) {
|
| 418 |
const parts = content.split('\n\n---\n*Response created by:');
|
| 419 |
content = parts[0];
|
| 420 |
if (parts[1]) {
|
|
|
|
| 436 |
`;
|
| 437 |
});
|
| 438 |
|
| 439 |
+
// Show loading dots if streaming
|
| 440 |
+
if (isLoading && messages.length > 0 && messages[messages.length - 1].role === 'assistant' && messages[messages.length - 1].content === '') {
|
| 441 |
+
// Replace the last empty message with loading dots
|
| 442 |
+
html = html.replace(/<div class="whitespace-pre-wrap text-gray-100"><\/div>/, `
|
| 443 |
+
<div class="flex items-center gap-2 text-gray-400">
|
| 444 |
+
<div class="w-2 h-2 bg-gray-400 rounded-full animate-bounce loading-dot-1"></div>
|
| 445 |
+
<div class="w-2 h-2 bg-gray-400 rounded-full animate-bounce loading-dot-2"></div>
|
| 446 |
+
<div class="w-2 h-2 bg-gray-400 rounded-full animate-bounce loading-dot-3"></div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 447 |
</div>
|
| 448 |
+
`);
|
| 449 |
}
|
| 450 |
|
| 451 |
messagesArea.innerHTML = html;
|