Error in using this model on @xenova/transformers

#1
by Em99 - opened

This browser-based implementation using Vite as the build tool results in an error.

Implementation:

import { pipeline, env } from "@xenova/transformers";

async function runTranscription() {
env.useBrowserCache = false;
env.allowLocalModels = false; // Skip local model checks since we're using the Hugging Face Hub

// Enable WASM backend proxy to prevent UI freezing
env.backends.onnx.wasm.proxy = true;

try {
// Load the pipeline for automatic speech recognition (ASR)
const pipe = await pipeline(
"automatic-speech-recognition",
"onnx-community/whisper-large-v3-turbo"
);

const transcript = await pipe(
  "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav"
);

console.log(transcript); // Output the transcription result

} catch (error) {
console.error("Transcription error:", error);
}
}

runTranscription();

Error encountered:

Error: Unsupported model type: whisper
at AutoModelForCTC.from_pretrained (@xenova_transformers.js?v=ab730207:24870:13)
at async @xenova_transformers.js?v=ab730207:29521:21

Sign up or log in to comment