File size: 1,284 Bytes
7956c78 2fc66df 7956c78 2fc66df 7956c78 fd28154 7956c78 fd28154 7956c78 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import type { ModelEntryWithTokenizer } from '$lib/types';
import type { ModelEntry } from '@huggingface/hub';
import type { PageServerLoad } from './$types';
import { env } from '$env/dynamic/private';
export const load: PageServerLoad = async ({ fetch }) => {
const apiUrl =
'https://huggingface.co/api/models?pipeline_tag=text-generation&inference=Warm&filter=conversational';
const HF_TOKEN = env.HF_TOKEN;
const res = await fetch(apiUrl, {
headers: {
Authorization: `Bearer ${HF_TOKEN}`
}
});
const compatibleModels: ModelEntry[] = await res.json();
compatibleModels.sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase()));
const promises = compatibleModels.map(async (model) => {
const configUrl = `https://huggingface.co/${model.modelId}/raw/main/tokenizer_config.json`;
const res = await fetch(configUrl, {
headers: {
Authorization: `Bearer ${HF_TOKEN}`
}
});
if (!res.ok) {
throw new Error(
`Failed to fetch tokenizer configuration for model ${model.id}: ${res.status} ${res.statusText}`
);
}
const tokenizerConfig = await res.json();
return { ...model, tokenizerConfig } satisfies ModelEntryWithTokenizer;
});
const models: ModelEntryWithTokenizer[] = await Promise.all(promises);
return { models };
};
|