Spaces:
Running
Running
File size: 3,552 Bytes
318ad79 632e088 318ad79 a0d2969 318ad79 2362cf7 318ad79 2362cf7 318ad79 2362cf7 318ad79 2362cf7 318ad79 2362cf7 318ad79 2362cf7 318ad79 2362cf7 a0d2969 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Ask Questions to the Model</title>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
</head>
<body>
<h1>Ask Questions to the Model</h1>
<a href="entrenament-pdf.html" style="margin:5px;padding: 5px; border:1px solid green">Entrenament PDF</a>
<a href="preguntar-pdf.html" style="margin:5px;padding: 5px; border:1px solid green">Preguntar PDF</a>
<br><br>
<input type="text" id="question" placeholder="Type your question here">
<button id="askQuestion">Ask</button>
<pre id="response"></pre>
<script>
async function loadModel() {
try {
console.log('Checking available models in IndexedDB...');
const model = await tf.loadLayersModel('indexeddb://pdf-trained-model');
console.log('Model loaded successfully from IndexedDB.');
return model;
} catch (err) {
document.getElementById('response').textContent = 'Model not found. Train it first!';
console.error('Error loading model:', err);
throw err;
}
}
function tokenizeQuestion(question, tokenizer) {
const tokens = question.split(/\s+/);
console.log('Tokens from question:', tokens);
return tokens.map(token => tokenizer[token] || 0);
}
document.getElementById('askQuestion').addEventListener('click', async () => {
const question = document.getElementById('question').value;
const responseElement = document.getElementById('response');
if (!question) {
responseElement.textContent = 'Please enter a question.';
return;
}
responseElement.textContent = 'Loading model...';
try {
const model = await loadModel();
// Tokenizer setup (replace with actual tokenizer logic from training)
const tokenizer = { "example": 1, "question": 2 }; // Placeholder for actual tokenizer
const input = tokenizeQuestion(question, tokenizer);
if (input.length === 0) {
responseElement.textContent = 'Error: Question could not be tokenized.';
return;
}
const paddedInput = tf.pad(
tf.tensor2d([input], [1, input.length]),
[[0, 0], [0, Math.max(0, 10 - input.length)]],
'constant'
);
console.log('Padded input for prediction:', paddedInput.arraySync());
try {
const prediction = model.predict(paddedInput);
const predictionArray = await prediction.array();
console.log('Prediction result:', predictionArray);
responseElement.textContent = `Model response: ${JSON.stringify(predictionArray)}`;
} catch (err) {
console.error('Prediction error:', err);
responseElement.textContent = 'Error during prediction.';
}
} catch (err) {
responseElement.textContent = 'Error: Could not load model or process question.';
console.error('Error in processing question:', err);
}
});
</script>
</body>
</html>
|