diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..76add878f8dd778c3381fb3da45c8140db7db510
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+node_modules
+dist
\ No newline at end of file
diff --git a/.npmrc b/.npmrc
new file mode 100644
index 0000000000000000000000000000000000000000..f263a6c7f9503dfb8e4b74cc3b5186ac324785bb
--- /dev/null
+++ b/.npmrc
@@ -0,0 +1,2 @@
+shared-workspace-lockfile = false
+include-workspace-root = true
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..f6ac7ef9fce621d89bd2bd4452e8bed49ca524ce
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,13 @@
+# syntax=docker/dockerfile:1
+# read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
+# you will also find guides on how best to write your Dockerfile
+FROM node:20
+
+WORKDIR /app
+
+RUN corepack enable
+
+COPY --link --chown=1000 . .
+
+RUN pnpm install
+RUN pnpm --filter widgets dev
\ No newline at end of file
diff --git a/README.md b/README.md
index 7e958246abedc8a92988ee1f6e942329e8ffbcc2..e2200bcda0d0c52ad7d4f54eaa2e32fd2f868f3f 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,7 @@ colorFrom: pink
 colorTo: red
 sdk: docker
 pinned: false
+app_port: 5173
 ---
 
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+Demo app for [Inference Widgets](https://github.com/huggingface/huggingface.js/tree/main/packages/widgets).
\ No newline at end of file
diff --git a/package.json b/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..c45d934995f1cbfc2c765e4d65c09256e54c0775
--- /dev/null
+++ b/package.json
@@ -0,0 +1,30 @@
+{
+	"license": "MIT",
+	"packageManager": "pnpm@8.10.5",
+	"dependencies": {
+		"@typescript-eslint/eslint-plugin": "^5.51.0",
+		"@typescript-eslint/parser": "^5.51.0",
+		"eslint": "^8.35.0",
+		"eslint-config-prettier": "^9.0.0",
+		"eslint-plugin-prettier": "^4.2.1",
+		"eslint-plugin-svelte": "^2.30.0",
+		"prettier": "^3.0.0",
+		"prettier-plugin-svelte": "^3.0.0",
+		"typescript": "^5.0.0",
+		"vite": "4.1.4"
+	},
+	"scripts": {
+		"lint": "eslint --quiet --fix --ext .cjs,.ts .eslintrc.cjs",
+		"lint:check": "eslint --ext .cjs,.ts .eslintrc.cjs",
+		"format": "prettier --write package.json .prettierrc .vscode .eslintrc.cjs e2e .github *.md",
+		"format:check": "prettier --check package.json .prettierrc .vscode .eslintrc.cjs .github *.md"
+	},
+	"devDependencies": {
+		"@vitest/browser": "^0.29.7",
+		"semver": "^7.5.0",
+		"ts-node": "^10.9.1",
+		"tsup": "^6.7.0",
+		"vitest": "^0.29.4",
+		"webdriverio": "^8.6.7"
+	}
+}
diff --git a/packages/tasks/.prettierignore b/packages/tasks/.prettierignore
new file mode 100644
index 0000000000000000000000000000000000000000..cac0c694965d419e7145c6ae3f371c733d5dba15
--- /dev/null
+++ b/packages/tasks/.prettierignore
@@ -0,0 +1,4 @@
+pnpm-lock.yaml
+# In order to avoid code samples to have tabs, they don't display well on npm
+README.md
+dist
\ No newline at end of file
diff --git a/packages/tasks/README.md b/packages/tasks/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..67285ef19d3f8a36cc57f8cd8f9022b5a7308c50
--- /dev/null
+++ b/packages/tasks/README.md
@@ -0,0 +1,20 @@
+# Tasks
+
+This package contains data used for https://huggingface.co/tasks.
+
+## Philosophy behind Tasks
+
+The Task pages are made to lower the barrier of entry to understand a task that can be solved with machine learning and use or train a model to accomplish it. It's a collaborative documentation effort made to help out software developers, social scientists, or anyone with no background in machine learning that is interested in understanding how machine learning models can be used to solve a problem. 
+
+The task pages avoid jargon to let everyone understand the documentation, and if specific terminology is needed, it is explained on the most basic level possible. This is important to understand before contributing to Tasks: at the end of every task page, the user is expected to be able to find and pull a model from the Hub and use it on their data and see if it works for their use case to come up with a proof of concept.
+
+## How to Contribute
+You can open a pull request to contribute a new documentation about a new task. Under `src` we have a folder for every task that contains two files, `about.md` and `data.ts`. `about.md` contains the markdown part of the page, use cases, resources and minimal code block to infer a model that belongs to the task. `data.ts` contains redirections to canonical models and datasets, metrics, the schema of the task and the information the inference widget needs. 
+
+![Anatomy of a Task Page](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/contribution-guide/anatomy.png)
+
+We have a [`dataset`](https://huggingface.co/datasets/huggingfacejs/tasks) that contains data used in the inference widget. The last file is `const.ts`, which has the task to library mapping (e.g. spacy to token-classification) where you can add a library. They will look in the top right corner like below.
+
+![Libraries of a Task](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/contribution-guide/libraries.png)
+
+This might seem overwhelming, but you don't necessarily need to add all of these in one pull request or on your own, you can simply contribute one section. Feel free to ask for help whenever you need. 
\ No newline at end of file
diff --git a/packages/tasks/package.json b/packages/tasks/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..2ee60dac623d6e28d6c86855cf2ec673e652ca51
--- /dev/null
+++ b/packages/tasks/package.json
@@ -0,0 +1,46 @@
+{
+	"name": "@huggingface/tasks",
+	"packageManager": "pnpm@8.10.5",
+	"version": "0.0.5",
+	"description": "List of ML tasks for huggingface.co/tasks",
+	"repository": "https://github.com/huggingface/huggingface.js.git",
+	"publishConfig": {
+		"access": "public"
+	},
+	"main": "./dist/index.js",
+	"module": "./dist/index.mjs",
+	"types": "./dist/index.d.ts",
+	"exports": {
+		".": {
+			"types": "./dist/index.d.ts",
+			"require": "./dist/index.js",
+			"import": "./dist/index.mjs"
+		}
+	},
+	"source": "src/index.ts",
+	"scripts": {
+		"lint": "eslint --quiet --fix --ext .cjs,.ts .",
+		"lint:check": "eslint --ext .cjs,.ts .",
+		"format": "prettier --write .",
+		"format:check": "prettier --check .",
+		"prepublishOnly": "pnpm run build",
+		"build": "tsup src/index.ts --format cjs,esm --clean --dts",
+		"prepare": "pnpm run build",
+		"check": "tsc"
+	},
+	"files": [
+		"dist",
+		"src",
+		"tsconfig.json"
+	],
+	"keywords": [
+		"huggingface",
+		"hub",
+		"languages"
+	],
+	"author": "Hugging Face",
+	"license": "MIT",
+	"devDependencies": {
+		"typescript": "^5.0.4"
+	}
+}
diff --git a/packages/tasks/pnpm-lock.yaml b/packages/tasks/pnpm-lock.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a3ed38c891dea128b57a69af6d76aa1473decd4c
--- /dev/null
+++ b/packages/tasks/pnpm-lock.yaml
@@ -0,0 +1,14 @@
+lockfileVersion: '6.0'
+
+devDependencies:
+  typescript:
+    specifier: ^5.0.4
+    version: 5.0.4
+
+packages:
+
+  /typescript@5.0.4:
+    resolution: {integrity: sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==}
+    engines: {node: '>=12.20'}
+    hasBin: true
+    dev: true
diff --git a/packages/tasks/src/Types.ts b/packages/tasks/src/Types.ts
new file mode 100644
index 0000000000000000000000000000000000000000..0824893f11271a7fe7873a2a0ddc803c8cdc1017
--- /dev/null
+++ b/packages/tasks/src/Types.ts
@@ -0,0 +1,64 @@
+import type { ModelLibraryKey } from "./modelLibraries";
+import type { PipelineType } from "./pipelines";
+
+export interface ExampleRepo {
+	description: string;
+	id: string;
+}
+
+export type TaskDemoEntry =
+	| {
+			filename: string;
+			type: "audio";
+	  }
+	| {
+			data: Array<{
+				label: string;
+				score: number;
+			}>;
+			type: "chart";
+	  }
+	| {
+			filename: string;
+			type: "img";
+	  }
+	| {
+			table: string[][];
+			type: "tabular";
+	  }
+	| {
+			content: string;
+			label: string;
+			type: "text";
+	  }
+	| {
+			text: string;
+			tokens: Array<{
+				end: number;
+				start: number;
+				type: string;
+			}>;
+			type: "text-with-tokens";
+	  };
+
+export interface TaskDemo {
+	inputs: TaskDemoEntry[];
+	outputs: TaskDemoEntry[];
+}
+
+export interface TaskData {
+	datasets: ExampleRepo[];
+	demo: TaskDemo;
+	id: PipelineType;
+	isPlaceholder?: boolean;
+	label: string;
+	libraries: ModelLibraryKey[];
+	metrics: ExampleRepo[];
+	models: ExampleRepo[];
+	spaces: ExampleRepo[];
+	summary: string;
+	widgetModels: string[];
+	youtubeId?: string;
+}
+
+export type TaskDataCustom = Omit<TaskData, "id" | "label" | "libraries">;
diff --git a/packages/tasks/src/audio-classification/about.md b/packages/tasks/src/audio-classification/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..9b1d7c6e9d8900375db0ba0d638cad0bb171676d
--- /dev/null
+++ b/packages/tasks/src/audio-classification/about.md
@@ -0,0 +1,85 @@
+## Use Cases
+
+### Command Recognition
+
+Command recognition or keyword spotting classifies utterances into a predefined set of commands. This is often done on-device for fast response time.
+
+As an example, using the Google Speech Commands dataset, given an input, a model can classify which of the following commands the user is typing:
+
+```
+'yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go', 'unknown', 'silence'
+```
+
+Speechbrain models can easily perform this task with just a couple of lines of code!
+
+```python
+from speechbrain.pretrained import EncoderClassifier
+model = EncoderClassifier.from_hparams(
+  "speechbrain/google_speech_command_xvector"
+)
+model.classify_file("file.wav")
+```
+
+### Language Identification
+
+Datasets such as VoxLingua107 allow anyone to train language identification models for up to 107 languages! This can be extremely useful as a preprocessing step for other systems. Here's an example [model](https://huggingface.co/TalTechNLP/voxlingua107-epaca-tdnn)trained on VoxLingua107.
+
+### Emotion recognition
+
+Emotion recognition is self explanatory. In addition to trying the widgets, you can use the Inference API to perform audio classification. Here is a simple example that uses a [HuBERT](https://huggingface.co/superb/hubert-large-superb-er) model fine-tuned for this task.
+
+```python
+import json
+import requests
+
+headers = {"Authorization": f"Bearer {API_TOKEN}"}
+API_URL = "https://api-inference.huggingface.co/models/superb/hubert-large-superb-er"
+
+def query(filename):
+    with open(filename, "rb") as f:
+        data = f.read()
+    response = requests.request("POST", API_URL, headers=headers, data=data)
+    return json.loads(response.content.decode("utf-8"))
+
+data = query("sample1.flac")
+# [{'label': 'neu', 'score': 0.60},
+# {'label': 'hap', 'score': 0.20},
+# {'label': 'ang', 'score': 0.13},
+# {'label': 'sad', 'score': 0.07}]
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer with audio classification models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.audioClassification({
+	data: await (await fetch("sample.flac")).blob(),
+	model: "facebook/mms-lid-126",
+});
+```
+
+### Speaker Identification
+
+Speaker Identification is classifying the audio of the person speaking. Speakers are usually predefined. You can try out this task with [this model](https://huggingface.co/superb/wav2vec2-base-superb-sid). A useful dataset for this task is VoxCeleb1.
+
+## Solving audio classification for your own data
+
+We have some great news! You can do fine-tuning (transfer learning) to train a well-performing model without requiring as much data. Pretrained models such as Wav2Vec2 and HuBERT exist. [Facebook's Wav2Vec2 XLS-R model](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) is a large multilingual model trained on 128 languages and with 436K hours of speech.
+
+## Useful Resources
+
+Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful!
+
+### Notebooks
+
+- [PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/audio_classification.ipynb)
+
+### Scripts for training
+
+- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification)
+
+### Documentation
+
+- [Audio classification task guide](https://huggingface.co/docs/transformers/tasks/audio_classification)
diff --git a/packages/tasks/src/audio-classification/data.ts b/packages/tasks/src/audio-classification/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..92e879c5cbe5e83011dd665b803433901ebe2096
--- /dev/null
+++ b/packages/tasks/src/audio-classification/data.ts
@@ -0,0 +1,77 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A benchmark of 10 different audio tasks.",
+			id: "superb",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "audio.wav",
+				type: "audio",
+			},
+		],
+		outputs: [
+			{
+				data: [
+					{
+						label: "Up",
+						score: 0.2,
+					},
+					{
+						label: "Down",
+						score: 0.8,
+					},
+				],
+				type: "chart",
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "",
+			id: "accuracy",
+		},
+		{
+			description: "",
+			id: "recall",
+		},
+		{
+			description: "",
+			id: "precision",
+		},
+		{
+			description: "",
+			id: "f1",
+		},
+	],
+	models: [
+		{
+			description: "An easy-to-use model for Command Recognition.",
+			id: "speechbrain/google_speech_command_xvector",
+		},
+		{
+			description: "An Emotion Recognition model.",
+			id: "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition",
+		},
+		{
+			description: "A language identification model.",
+			id: "facebook/mms-lid-126",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that can predict the language spoken in a given audio.",
+			id: "akhaliq/Speechbrain-audio-classification",
+		},
+	],
+	summary:
+		"Audio classification is the task of assigning a label or class to a given audio. It can be used for recognizing which command a user is giving or the emotion of a statement, as well as identifying a speaker.",
+	widgetModels: ["facebook/mms-lid-126"],
+	youtubeId: "KWwzcmG98Ds",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/audio-to-audio/about.md b/packages/tasks/src/audio-to-audio/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..e56275277d211906c0bea7891f7bdb5fa0aeae7f
--- /dev/null
+++ b/packages/tasks/src/audio-to-audio/about.md
@@ -0,0 +1,56 @@
+## Use Cases
+
+### Speech Enhancement (Noise removal)
+
+Speech Enhancement is a bit self explanatory. It improves (or enhances) the quality of an audio by removing noise. There are multiple libraries to solve this task, such as Speechbrain, Asteroid and ESPNet. Here is a simple example using Speechbrain
+
+```python
+from speechbrain.pretrained import SpectralMaskEnhancement
+model = SpectralMaskEnhancement.from_hparams(
+  "speechbrain/mtl-mimic-voicebank"
+)
+model.enhance_file("file.wav")
+```
+
+Alternatively, you can use the [Inference API](https://huggingface.co/inference-api) to solve this task
+
+```python
+import json
+import requests
+
+headers = {"Authorization": f"Bearer {API_TOKEN}"}
+API_URL = "https://api-inference.huggingface.co/models/speechbrain/mtl-mimic-voicebank"
+
+def query(filename):
+    with open(filename, "rb") as f:
+        data = f.read()
+    response = requests.request("POST", API_URL, headers=headers, data=data)
+    return json.loads(response.content.decode("utf-8"))
+
+data = query("sample1.flac")
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer with audio-to-audio models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.audioToAudio({
+	data: await (await fetch("sample.flac")).blob(),
+	model: "speechbrain/sepformer-wham",
+});
+```
+
+### Audio Source Separation
+
+Audio Source Separation allows you to isolate different sounds from individual sources. For example, if you have an audio file with multiple people speaking, you can get an audio file for each of them. You can then use an Automatic Speech Recognition system to extract the text from each of these sources as an initial step for your system!
+
+Audio-to-Audio can also be used to remove noise from audio files: you get one audio for the person speaking and another audio for the noise. This can also be useful when you have multi-person audio with some noise: yyou can get one audio for each person and then one audio for the noise.
+
+## Training a model for your own data
+
+If you want to learn how to train models for the Audio-to-Audio task, we recommend the following tutorials:
+
+- [Speech Enhancement](https://speechbrain.github.io/tutorial_enhancement.html)
+- [Source Separation](https://speechbrain.github.io/tutorial_separation.html)
diff --git a/packages/tasks/src/audio-to-audio/data.ts b/packages/tasks/src/audio-to-audio/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..56f03188e3e9bbe93d3ebd72b54b06fe1756f8cb
--- /dev/null
+++ b/packages/tasks/src/audio-to-audio/data.ts
@@ -0,0 +1,66 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "512-element X-vector embeddings of speakers from CMU ARCTIC dataset.",
+			id: "Matthijs/cmu-arctic-xvectors",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "input.wav",
+				type: "audio",
+			},
+		],
+		outputs: [
+			{
+				filename: "label-0.wav",
+				type: "audio",
+			},
+			{
+				filename: "label-1.wav",
+				type: "audio",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"The Signal-to-Noise ratio is the relationship between the target signal level and the background noise level. It is calculated as the logarithm of the target signal divided by the background noise, in decibels.",
+			id: "snri",
+		},
+		{
+			description:
+				"The Signal-to-Distortion ratio is the relationship between the target signal and the sum of noise, interference, and artifact errors",
+			id: "sdri",
+		},
+	],
+	models: [
+		{
+			description: "A solid model of audio source separation.",
+			id: "speechbrain/sepformer-wham",
+		},
+		{
+			description: "A speech enhancement model.",
+			id: "speechbrain/metricgan-plus-voicebank",
+		},
+	],
+	spaces: [
+		{
+			description: "An application for speech separation.",
+			id: "younver/speechbrain-speech-separation",
+		},
+		{
+			description: "An application for audio style transfer.",
+			id: "nakas/audio-diffusion_style_transfer",
+		},
+	],
+	summary:
+		"Audio-to-Audio is a family of tasks in which the input is an audio and the output is one or multiple generated audios. Some example tasks are speech enhancement and source separation.",
+	widgetModels: ["speechbrain/sepformer-wham"],
+	youtubeId: "iohj7nCCYoM",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/automatic-speech-recognition/about.md b/packages/tasks/src/automatic-speech-recognition/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..3871cba1c377a25e0d1041c24966748a50d2f5ed
--- /dev/null
+++ b/packages/tasks/src/automatic-speech-recognition/about.md
@@ -0,0 +1,87 @@
+## Use Cases
+
+### Virtual Speech Assistants
+
+Many edge devices have an embedded virtual assistant to interact with the end users better. These assistances rely on ASR models to recognize different voice commands to perform various tasks. For instance, you can ask your phone for dialing a phone number, ask a general question, or schedule a meeting.
+
+### Caption Generation
+
+A caption generation model takes audio as input from sources to generate automatic captions through transcription, for live-streamed or recorded videos. This can help with content accessibility. For example, an audience watching a video that includes a non-native language, can rely on captions to interpret the content. It can also help with information retention at online-classes environments improving knowledge assimilation while reading and taking notes faster.
+
+## Task Variants
+
+### Multilingual ASR
+
+Multilingual ASR models can convert audio inputs with multiple languages into transcripts. Some multilingual ASR models include [language identification](https://huggingface.co/tasks/audio-classification) blocks to improve the performance.
+
+The use of Multilingual ASR has become popular, the idea of maintaining just a single model for all language can simplify the production pipeline. Take a look at [Whisper](https://huggingface.co/openai/whisper-large-v2) to get an idea on how 100+ languages can be processed by a single model.
+
+## Inference
+
+The Hub contains over [~9,000 ASR models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=downloads) that you can use right away by trying out the widgets directly in the browser or calling the models as a service using the Inference API. Here is a simple code snippet to do exactly this:
+
+```python
+import json
+import requests
+
+headers = {"Authorization": f"Bearer {API_TOKEN}"}
+API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v2"
+
+def query(filename):
+    with open(filename, "rb") as f:
+        data = f.read()
+    response = requests.request("POST", API_URL, headers=headers, data=data)
+    return json.loads(response.content.decode("utf-8"))
+
+data = query("sample1.flac")
+```
+
+You can also use libraries such as [transformers](https://huggingface.co/models?library=transformers&pipeline_tag=automatic-speech-recognition&sort=downloads), [speechbrain](https://huggingface.co/models?library=speechbrain&pipeline_tag=automatic-speech-recognition&sort=downloads), [NeMo](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&library=nemo&sort=downloads) and [espnet](https://huggingface.co/models?library=espnet&pipeline_tag=automatic-speech-recognition&sort=downloads) if you want one-click managed Inference without any hassle.
+
+```python
+from transformers import pipeline
+
+with open("sample.flac", "rb") as f:
+  data = f.read()
+
+pipe = pipeline("automatic-speech-recognition", "openai/whisper-large-v2")
+pipe("sample.flac")
+# {'text': "GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES IN DRAUGHTY SCHOOL ROOMS DAY AFTER DAY FOR A FORTNIGHT HE'LL HAVE TO PUT IN AN APPEARANCE AT SOME PLACE OF WORSHIP ON SUNDAY MORNING AND HE CAN COME TO US IMMEDIATELY AFTERWARDS"}
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to transcribe text with javascript using models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.automaticSpeechRecognition({
+	data: await (await fetch("sample.flac")).blob(),
+	model: "openai/whisper-large-v2",
+});
+```
+
+## Solving ASR for your own data
+
+We have some great news! You can fine-tune (transfer learning) a foundational speech model on a specific language without tonnes of data. Pretrained models such as Whisper, Wav2Vec2-MMS and HuBERT exist. [OpenAI's Whisper model](https://huggingface.co/openai/whisper-large-v2) is a large multilingual model trained on 100+ languages and with 680K hours of speech.
+
+The following detailed [blog post](https://huggingface.co/blog/fine-tune-whisper) shows how to fine-tune a pre-trained Whisper checkpoint on labeled data for ASR. With the right data and strategy you can fine-tune a high-performant model on a free Google Colab instance too. We suggest to read the blog post for more info!
+
+## Hugging Face Whisper Event
+
+On December 2022, over 450 participants collaborated, fine-tuned and shared 600+ ASR Whisper models in 100+ different languages. You can compare these models on the event's speech recognition [leaderboard](https://huggingface.co/spaces/whisper-event/leaderboard?dataset=mozilla-foundation%2Fcommon_voice_11_0&config=ar&split=test).
+
+These events help democratize ASR for all languages, including low-resource languages. In addition to the trained models, the [event](https://github.com/huggingface/community-events/tree/main/whisper-fine-tuning-event) helps to build practical collaborative knowledge.
+
+## Useful Resources
+
+- [Fine-tuning MetaAI's MMS Adapter Models for Multi-Lingual ASR](https://huggingface.co/blog/mms_adapters)
+- [Making automatic speech recognition work on large files with Wav2Vec2 in 🤗 Transformers](https://huggingface.co/blog/asr-chunking)
+- [Boosting Wav2Vec2 with n-grams in 🤗 Transformers](https://huggingface.co/blog/wav2vec2-with-ngram)
+- [ML for Audio Study Group - Intro to Audio and ASR Deep Dive](https://www.youtube.com/watch?v=D-MH6YjuIlE)
+- [Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters](https://arxiv.org/pdf/2007.03001.pdf)
+- An ASR toolkit made by [NVIDIA: NeMo](https://github.com/NVIDIA/NeMo) with code and pretrained models useful for new ASR models. Watch the [introductory video](https://www.youtube.com/embed/wBgpMf_KQVw) for an overview.
+- [An introduction to SpeechT5, a multi-purpose speech recognition and synthesis model](https://huggingface.co/blog/speecht5)
+- [A guide on Fine-tuning Whisper For Multilingual ASR with 🤗Transformers](https://huggingface.co/blog/fine-tune-whisper)
+- [Automatic speech recognition task guide](https://huggingface.co/docs/transformers/tasks/asr)
+- [Speech Synthesis, Recognition, and More With SpeechT5](https://huggingface.co/blog/speecht5)
diff --git a/packages/tasks/src/automatic-speech-recognition/data.ts b/packages/tasks/src/automatic-speech-recognition/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..05d13e14cfa306df4000ebed85aee46660b886f1
--- /dev/null
+++ b/packages/tasks/src/automatic-speech-recognition/data.ts
@@ -0,0 +1,78 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "18,000 hours of multilingual audio-text dataset in 108 languages.",
+			id: "mozilla-foundation/common_voice_13_0",
+		},
+		{
+			description: "An English dataset with 1,000 hours of data.",
+			id: "librispeech_asr",
+		},
+		{
+			description: "High quality, multi-speaker audio data and their transcriptions in various languages.",
+			id: "openslr",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "input.flac",
+				type: "audio",
+			},
+		],
+		outputs: [
+			{
+				/// GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES I
+				label: "Transcript",
+				content: "Going along slushy country roads and speaking to damp audiences in...",
+				type: "text",
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "",
+			id: "wer",
+		},
+		{
+			description: "",
+			id: "cer",
+		},
+	],
+	models: [
+		{
+			description: "A powerful ASR model by OpenAI.",
+			id: "openai/whisper-large-v2",
+		},
+		{
+			description: "A good generic ASR model by MetaAI.",
+			id: "facebook/wav2vec2-base-960h",
+		},
+		{
+			description: "An end-to-end model that performs ASR and Speech Translation by MetaAI.",
+			id: "facebook/s2t-small-mustc-en-fr-st",
+		},
+	],
+	spaces: [
+		{
+			description: "A powerful general-purpose speech recognition application.",
+			id: "openai/whisper",
+		},
+		{
+			description: "Fastest speech recognition application.",
+			id: "sanchit-gandhi/whisper-jax",
+		},
+		{
+			description: "An application that transcribes speeches in YouTube videos.",
+			id: "jeffistyping/Youtube-Whisperer",
+		},
+	],
+	summary:
+		"Automatic Speech Recognition (ASR), also known as Speech to Text (STT), is the task of transcribing a given audio to text. It has many applications, such as voice user interfaces.",
+	widgetModels: ["openai/whisper-large-v2"],
+	youtubeId: "TksaY_FDgnk",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/const.ts b/packages/tasks/src/const.ts
new file mode 100644
index 0000000000000000000000000000000000000000..34fb9b24a4b3d92fef965a3490de09693f8bf584
--- /dev/null
+++ b/packages/tasks/src/const.ts
@@ -0,0 +1,59 @@
+import type { ModelLibraryKey } from "./modelLibraries";
+import type { PipelineType } from "./pipelines";
+
+/**
+ * Model libraries compatible with each ML task
+ */
+export const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]> = {
+	"audio-classification": ["speechbrain", "transformers"],
+	"audio-to-audio": ["asteroid", "speechbrain"],
+	"automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"],
+	conversational: ["transformers"],
+	"depth-estimation": ["transformers"],
+	"document-question-answering": ["transformers"],
+	"feature-extraction": ["sentence-transformers", "transformers", "transformers.js"],
+	"fill-mask": ["transformers", "transformers.js"],
+	"graph-ml": ["transformers"],
+	"image-classification": ["keras", "timm", "transformers", "transformers.js"],
+	"image-segmentation": ["transformers", "transformers.js"],
+	"image-to-image": [],
+	"image-to-text": ["transformers.js"],
+	"video-classification": [],
+	"multiple-choice": ["transformers"],
+	"object-detection": ["transformers", "transformers.js"],
+	other: [],
+	"question-answering": ["adapter-transformers", "allennlp", "transformers", "transformers.js"],
+	robotics: [],
+	"reinforcement-learning": ["transformers", "stable-baselines3", "ml-agents", "sample-factory"],
+	"sentence-similarity": ["sentence-transformers", "spacy", "transformers.js"],
+	summarization: ["transformers", "transformers.js"],
+	"table-question-answering": ["transformers"],
+	"table-to-text": ["transformers"],
+	"tabular-classification": ["sklearn"],
+	"tabular-regression": ["sklearn"],
+	"tabular-to-text": ["transformers"],
+	"text-classification": ["adapter-transformers", "spacy", "transformers", "transformers.js"],
+	"text-generation": ["transformers", "transformers.js"],
+	"text-retrieval": [],
+	"text-to-image": [],
+	"text-to-speech": ["espnet", "tensorflowtts", "transformers"],
+	"text-to-audio": ["transformers"],
+	"text-to-video": [],
+	"text2text-generation": ["transformers", "transformers.js"],
+	"time-series-forecasting": [],
+	"token-classification": [
+		"adapter-transformers",
+		"flair",
+		"spacy",
+		"span-marker",
+		"stanza",
+		"transformers",
+		"transformers.js",
+	],
+	translation: ["transformers", "transformers.js"],
+	"unconditional-image-generation": [],
+	"visual-question-answering": [],
+	"voice-activity-detection": [],
+	"zero-shot-classification": ["transformers", "transformers.js"],
+	"zero-shot-image-classification": ["transformers.js"],
+};
diff --git a/packages/tasks/src/conversational/about.md b/packages/tasks/src/conversational/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..d2141ba20fbaa7c1093e3a2f03208d62e36b0ac6
--- /dev/null
+++ b/packages/tasks/src/conversational/about.md
@@ -0,0 +1,50 @@
+## Use Cases
+
+### Chatbot 💬
+
+Chatbots are used to have conversations instead of providing direct contact with a live human. They are used to provide customer service, sales, and can even be used to play games (see [ELIZA](https://en.wikipedia.org/wiki/ELIZA) from 1966 for one of the earliest examples).
+
+## Voice Assistants 🎙️
+
+Conversational response models are used as part of voice assistants to provide appropriate responses to voice based queries.
+
+## Inference
+
+You can infer with Conversational models with the 🤗 Transformers library using the `conversational` pipeline. This pipeline takes a conversation prompt or a list of conversations and generates responses for each prompt. The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task (see https://huggingface.co/models?filter=conversational for a list of updated Conversational models).
+
+```python
+from transformers import pipeline, Conversation
+converse = pipeline("conversational")
+
+conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
+conversation_2 = Conversation("What's the last book you have read?")
+converse([conversation_1, conversation_2])
+
+## Output:
+## Conversation 1
+## user >> Going to the movies tonight - any suggestions?
+## bot >> The Big Lebowski ,
+## Conversation 2
+## user >> What's the last book you have read?
+## bot >> The Last Question
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer with conversational models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.conversational({
+	model: "facebook/blenderbot-400M-distill",
+	inputs: "Going to the movies tonight - any suggestions?",
+});
+```
+
+## Useful Resources
+
+- Learn how ChatGPT and InstructGPT work in this blog: [Illustrating Reinforcement Learning from Human Feedback (RLHF)](https://huggingface.co/blog/rlhf)
+- [Reinforcement Learning from Human Feedback From Zero to ChatGPT](https://www.youtube.com/watch?v=EAd4oQtEJOM)
+- [A guide on Dialog Agents](https://huggingface.co/blog/dialog-agents)
+
+This page was made possible thanks to the efforts of [Viraat Aryabumi](https://huggingface.co/viraat).
diff --git a/packages/tasks/src/conversational/data.ts b/packages/tasks/src/conversational/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..85c4057612b31883e21b208bedf235199055e721
--- /dev/null
+++ b/packages/tasks/src/conversational/data.ts
@@ -0,0 +1,66 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description:
+				"A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.",
+			id: "blended_skill_talk",
+		},
+		{
+			description:
+				"ConvAI is a dataset of human-to-bot conversations labeled for quality. This data can be used to train a metric for evaluating dialogue systems",
+			id: "conv_ai_2",
+		},
+		{
+			description: "EmpatheticDialogues, is a dataset of 25k conversations grounded in emotional situations",
+			id: "empathetic_dialogues",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "Hey my name is Julien! How are you?",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				label: "Answer",
+				content: "Hi Julien! My name is Julia! I am well.",
+				type: "text",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"BLEU score is calculated by counting the number of shared single or subsequent tokens between the generated sequence and the reference. Subsequent n tokens are called “n-grams”. Unigram refers to a single token while bi-gram refers to token pairs and n-grams refer to n subsequent tokens. The score ranges from 0 to 1, where 1 means the translation perfectly matched and 0 did not match at all",
+			id: "bleu",
+		},
+	],
+	models: [
+		{
+			description: "A faster and smaller model than the famous BERT model.",
+			id: "facebook/blenderbot-400M-distill",
+		},
+		{
+			description:
+				"DialoGPT is a large-scale pretrained dialogue response generation model for multiturn conversations.",
+			id: "microsoft/DialoGPT-large",
+		},
+	],
+	spaces: [
+		{
+			description: "A chatbot based on Blender model.",
+			id: "EXFINITE/BlenderBot-UI",
+		},
+	],
+	summary:
+		"Conversational response modelling is the task of generating conversational text that is relevant, coherent and knowledgable given a prompt. These models have applications in chatbots, and as a part of voice assistants",
+	widgetModels: ["facebook/blenderbot-400M-distill"],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/depth-estimation/about.md b/packages/tasks/src/depth-estimation/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..b83d60e24f1441129412ed1d4ebd562fec560453
--- /dev/null
+++ b/packages/tasks/src/depth-estimation/about.md
@@ -0,0 +1,36 @@
+## Use Cases
+Depth estimation models can be used to estimate the depth of different objects present in an image.
+
+### Estimation of Volumetric Information
+Depth estimation models are widely used to study volumetric formation of objects present inside an image. This is an important use case in the domain of computer graphics.
+
+### 3D Representation
+
+Depth estimation models can also be used to develop a 3D representation from a 2D image.
+
+## Inference
+
+With the `transformers` library, you can use the `depth-estimation` pipeline to infer with image classification models. You can initialize the pipeline with a model id from the Hub. If you do not provide a model id it will initialize with [Intel/dpt-large](https://huggingface.co/Intel/dpt-large) by default. When calling the pipeline you just need to specify a path, http link or an image loaded in PIL. Additionally, you can find a comprehensive list of various depth estimation models at [this link](https://huggingface.co/models?pipeline_tag=depth-estimation).
+
+```python
+from transformers import pipeline
+
+estimator = pipeline(task="depth-estimation", model="Intel/dpt-large")
+result = estimator(images="http://images.cocodataset.org/val2017/000000039769.jpg")
+result
+
+# {'predicted_depth': tensor([[[ 6.3199,  6.3629,  6.4148,  ..., 10.4104, 10.5109, 10.3847],
+#           [ 6.3850,  6.3615,  6.4166,  ..., 10.4540, 10.4384, 10.4554],
+#           [ 6.3519,  6.3176,  6.3575,  ..., 10.4247, 10.4618, 10.4257],
+#           ...,
+#           [22.3772, 22.4624, 22.4227,  ..., 22.5207, 22.5593, 22.5293],
+#           [22.5073, 22.5148, 22.5114,  ..., 22.6604, 22.6344, 22.5871],
+#           [22.5176, 22.5275, 22.5218,  ..., 22.6282, 22.6216, 22.6108]]]),
+#  'depth': <PIL.Image.Image image mode=L size=640x480 at 0x7F1A8BFE5D90>}
+
+# You can visualize the result just by calling `result["depth"]`.
+```
+
+## Useful Resources
+
+- [Monocular depth estimation task guide](https://huggingface.co/docs/transformers/tasks/monocular_depth_estimation)
diff --git a/packages/tasks/src/depth-estimation/data.ts b/packages/tasks/src/depth-estimation/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..1a9b0d2a183028679b0f606e84fb07e16f40f8a6
--- /dev/null
+++ b/packages/tasks/src/depth-estimation/data.ts
@@ -0,0 +1,52 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "NYU Depth V2 Dataset: Video dataset containing both RGB and depth sensor data",
+			id: "sayakpaul/nyu_depth_v2",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "depth-estimation-input.jpg",
+				type: "img",
+			},
+		],
+		outputs: [
+			{
+				filename: "depth-estimation-output.png",
+				type: "img",
+			},
+		],
+	},
+	metrics: [],
+	models: [
+		{
+			// TO DO: write description
+			description: "Strong Depth Estimation model trained on 1.4 million images.",
+			id: "Intel/dpt-large",
+		},
+		{
+			// TO DO: write description
+			description: "Strong Depth Estimation model trained on the KITTI dataset.",
+			id: "vinvino02/glpn-kitti",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that predicts the depth of an image and then reconstruct the 3D model as voxels.",
+			id: "radames/dpt-depth-estimation-3d-voxels",
+		},
+		{
+			description: "An application that can estimate the depth in a given image.",
+			id: "keras-io/Monocular-Depth-Estimation",
+		},
+	],
+	summary: "Depth estimation is the task of predicting depth of the objects present in an image.",
+	widgetModels: [""],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/document-question-answering/about.md b/packages/tasks/src/document-question-answering/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..528c29ec917ace00387344b09671e9a90fcc6e06
--- /dev/null
+++ b/packages/tasks/src/document-question-answering/about.md
@@ -0,0 +1,53 @@
+## Use Cases
+
+Document Question Answering models can be used to answer natural language questions about documents. Typically, document QA models consider textual, layout and potentially visual information. This is useful when the question requires some understanding of the visual aspects of the document.
+Nevertheless, certain document QA models can work without document images. Hence the task is not limited to visually-rich documents and allows users to ask questions based on spreadsheets, text PDFs, etc!
+
+### Document Parsing
+
+One of the most popular use cases of document question answering models is the parsing of structured documents. For example, you can extract the name, address, and other information from a form. You can also use the model to extract information from a table, or even a resume.
+
+### Invoice Information Extraction
+
+Another very popular use case is invoice information extraction. For example, you can extract the invoice number, the invoice date, the total amount, the VAT number, and the invoice recipient.
+
+## Inference
+
+You can infer with Document QA models with the 🤗 Transformers library using the [`document-question-answering` pipeline](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.DocumentQuestionAnsweringPipeline). If no model checkpoint is given, the pipeline will be initialized with [`impira/layoutlm-document-qa`](https://huggingface.co/impira/layoutlm-document-qa). This pipeline takes question(s) and document(s) as input, and returns the answer.  
+👉 Note that the question answering task solved here is extractive: the model extracts the answer from a context (the document).
+
+```python
+from transformers import pipeline
+from PIL import Image
+
+pipe = pipeline("document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa")
+
+question = "What is the purchase amount?"
+image = Image.open("your-document.png")
+
+pipe(image=image, question=question)
+
+## [{'answer': '20,000$'}]
+```
+
+## Useful Resources
+
+Would you like to learn more about Document QA? Awesome! Here are some curated resources that you may find helpful!
+
+- [Document Visual Question Answering (DocVQA) challenge](https://rrc.cvc.uab.es/?ch=17)
+- [DocVQA: A Dataset for Document Visual Question Answering](https://arxiv.org/abs/2007.00398) (Dataset paper)
+- [ICDAR 2021 Competition on Document Visual Question Answering](https://lilianweng.github.io/lil-log/2020/10/29/open-domain-question-answering.html) (Conference paper)
+- [HuggingFace's Document Question Answering pipeline](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.DocumentQuestionAnsweringPipeline)
+- [Github repo: DocQuery - Document Query Engine Powered by Large Language Models](https://github.com/impira/docquery)
+
+### Notebooks
+
+- [Fine-tuning Donut on DocVQA dataset](https://github.com/NielsRogge/Transformers-Tutorials/tree/0ea77f29d01217587d7e32a848f3691d9c15d6ab/Donut/DocVQA)
+- [Fine-tuning LayoutLMv2 on DocVQA dataset](https://github.com/NielsRogge/Transformers-Tutorials/tree/1b4bad710c41017d07a8f63b46a12523bfd2e835/LayoutLMv2/DocVQA)
+- [Accelerating Document AI](https://huggingface.co/blog/document-ai)
+
+### Documentation
+
+- [Document question answering task guide](https://huggingface.co/docs/transformers/tasks/document_question_answering)
+
+The contents of this page are contributed by [Eliott Zemour](https://huggingface.co/eliolio) and reviewed by [Kwadwo Agyapon-Ntra](https://huggingface.co/KayO) and [Ankur Goyal](https://huggingface.co/ankrgyl).
diff --git a/packages/tasks/src/document-question-answering/data.ts b/packages/tasks/src/document-question-answering/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..275173fa873a8eea24e6ddb04534a1a6758d16d2
--- /dev/null
+++ b/packages/tasks/src/document-question-answering/data.ts
@@ -0,0 +1,70 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			// TODO write proper description
+			description:
+				"Dataset from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry Documents Library.",
+			id: "eliolio/docvqa",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Question",
+				content: "What is the idea behind the consumer relations efficiency team?",
+				type: "text",
+			},
+			{
+				filename: "document-question-answering-input.png",
+				type: "img",
+			},
+		],
+		outputs: [
+			{
+				label: "Answer",
+				content: "Balance cost efficiency with quality customer service",
+				type: "text",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"The evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein Similarity (ANLS). This metric is flexible to character regognition errors and compares the predicted answer with the ground truth answer.",
+			id: "anls",
+		},
+		{
+			description:
+				"Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0",
+			id: "exact-match",
+		},
+	],
+	models: [
+		{
+			description: "A LayoutLM model for the document QA task, fine-tuned on DocVQA and SQuAD2.0.",
+			id: "impira/layoutlm-document-qa",
+		},
+		{
+			description: "A special model for OCR-free Document QA task. Donut model fine-tuned on DocVQA.",
+			id: "naver-clova-ix/donut-base-finetuned-docvqa",
+		},
+	],
+	spaces: [
+		{
+			description: "A robust document question answering application.",
+			id: "impira/docquery",
+		},
+		{
+			description: "An application that can answer questions from invoices.",
+			id: "impira/invoices",
+		},
+	],
+	summary:
+		"Document Question Answering (also known as Document Visual Question Answering) is the task of answering questions on document images. Document question answering models take a (document, question) pair as input and return an answer in natural language. Models usually rely on multi-modal features, combining text, position of words (bounding-boxes) and image.",
+	widgetModels: ["impira/layoutlm-document-qa"],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/feature-extraction/about.md b/packages/tasks/src/feature-extraction/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..60c7c7ed33c16ce43962edf2d3af6f0f963f6508
--- /dev/null
+++ b/packages/tasks/src/feature-extraction/about.md
@@ -0,0 +1,34 @@
+## About the Task
+
+Feature extraction is the task of building features intended to be informative from a given dataset,
+facilitating the subsequent learning and generalization steps in various domains of machine learning.
+
+## Use Cases
+
+Feature extraction can be used to do transfer learning in natural language processing, computer vision and audio models.
+
+## Inference
+
+#### Feature Extraction
+
+```python
+from transformers import pipeline
+checkpoint = "facebook/bart-base"
+feature_extractor = pipeline("feature-extraction",framework="pt",model=checkpoint)
+text = "Transformers is an awesome library!"
+
+#Reducing along the first dimension to get a 768 dimensional array
+feature_extractor(text,return_tensors = "pt")[0].numpy().mean(axis=0)
+
+'''tensor([[[ 2.5834,  2.7571,  0.9024,  ...,  1.5036, -0.0435, -0.8603],
+         [-1.2850, -1.0094, -2.0826,  ...,  1.5993, -0.9017,  0.6426],
+         [ 0.9082,  0.3896, -0.6843,  ...,  0.7061,  0.6517,  1.0550],
+         ...,
+         [ 0.6919, -1.1946,  0.2438,  ...,  1.3646, -1.8661, -0.1642],
+         [-0.1701, -2.0019, -0.4223,  ...,  0.3680, -1.9704, -0.0068],
+         [ 0.2520, -0.6869, -1.0582,  ...,  0.5198, -2.2106,  0.4547]]])'''
+```
+
+## Useful resources
+
+- [Documentation for feature extractor of 🤗Transformers](https://huggingface.co/docs/transformers/main_classes/feature_extractor)
diff --git a/packages/tasks/src/feature-extraction/data.ts b/packages/tasks/src/feature-extraction/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..fe5f1785b92b078fe03f7c92dba5644120eeafe5
--- /dev/null
+++ b/packages/tasks/src/feature-extraction/data.ts
@@ -0,0 +1,54 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description:
+				"Wikipedia dataset containing cleaned articles of all languages. Can be used to train `feature-extraction` models.",
+			id: "wikipedia",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "India, officially the Republic of India, is a country in South Asia.",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				table: [
+					["Dimension 1", "Dimension 2", "Dimension 3"],
+					["2.583383083343506", "2.757075071334839", "0.9023529887199402"],
+					["8.29393482208252", "1.1071064472198486", "2.03399395942688"],
+					["-0.7754912972450256", "-1.647324562072754", "-0.6113331913948059"],
+					["0.07087723910808563", "1.5942802429199219", "1.4610432386398315"],
+				],
+				type: "tabular",
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "",
+			id: "",
+		},
+	],
+	models: [
+		{
+			description: "A powerful feature extraction model for natural language processing tasks.",
+			id: "facebook/bart-base",
+		},
+		{
+			description: "A strong feature extraction model for coding tasks.",
+			id: "microsoft/codebert-base",
+		},
+	],
+	spaces: [],
+	summary:
+		"Feature extraction refers to the process of transforming raw data into numerical features that can be processed while preserving the information in the original dataset.",
+	widgetModels: ["facebook/bart-base"],
+};
+
+export default taskData;
diff --git a/packages/tasks/src/fill-mask/about.md b/packages/tasks/src/fill-mask/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..4fabd3cf6d06d8ba9e676eb1f637c5f688b456fb
--- /dev/null
+++ b/packages/tasks/src/fill-mask/about.md
@@ -0,0 +1,51 @@
+## Use Cases
+
+### Domain Adaptation 👩‍⚕️
+
+Masked language models do not require labelled data! They are trained by masking a couple of words in sentences and the model is expected to guess the masked word. This makes it very practical!
+
+For example, masked language modeling is used to train large models for domain-specific problems. If you have to work on a domain-specific task, such as retrieving information from medical research papers, you can train a masked language model using those papers. 📄
+
+The resulting model has a statistical understanding of the language used in medical research papers, and can be further trained in a process called fine-tuning to solve different tasks, such as [Text Classification](/tasks/text-classification) or [Question Answering](/tasks/question-answering) to build a medical research papers information extraction system. 👩‍⚕️ Pre-training on domain-specific data tends to yield better results (see [this paper](https://arxiv.org/abs/2007.15779) for an example).
+
+If you don't have the data to train a masked language model, you can also use an existing [domain-specific masked language model](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext) from the Hub and fine-tune it with your smaller task dataset. That's the magic of Open Source and sharing your work! 🎉
+
+## Inference with Fill-Mask Pipeline
+
+You can use the 🤗 Transformers library `fill-mask` pipeline to do inference with masked language models. If a model name is not provided, the pipeline will be initialized with [distilroberta-base](/distilroberta-base). You can provide masked text and it will return a list of possible mask values ​​ranked according to the score.
+
+```python
+from transformers import pipeline
+
+classifier = pipeline("fill-mask")
+classifier("Paris is the <mask> of France.")
+
+# [{'score': 0.7, 'sequence': 'Paris is the capital of France.'},
+# {'score': 0.2, 'sequence': 'Paris is the birthplace of France.'},
+# {'score': 0.1, 'sequence': 'Paris is the heart of France.'}]
+```
+
+## Useful Resources
+
+Would you like to learn more about the topic? Awesome! Here you can find some curated resources that can be helpful to you!
+
+- [Course Chapter on Fine-tuning a Masked Language Model](https://huggingface.co/course/chapter7/3?fw=pt)
+- [Workshop on Pretraining Language Models and CodeParrot](https://www.youtube.com/watch?v=ExUR7w6xe94)
+- [BERT 101: State Of The Art NLP Model Explained](https://huggingface.co/blog/bert-101)
+- [Nyströmformer: Approximating self-attention in linear time and memory via the Nyström method](https://huggingface.co/blog/nystromformer)
+
+### Notebooks
+
+- [Pre-training an MLM for JAX/Flax](https://github.com/huggingface/notebooks/blob/master/examples/masked_language_modeling_flax.ipynb)
+- [Masked language modeling in TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb)
+- [Masked language modeling in PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling.ipynb)
+
+### Scripts for training
+
+- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling)
+- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling)
+- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling)
+
+### Documentation
+
+- [Masked language modeling task guide](https://huggingface.co/docs/transformers/tasks/masked_language_modeling)
diff --git a/packages/tasks/src/fill-mask/data.ts b/packages/tasks/src/fill-mask/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..4e8204b159ff19257fe19877947236a7e29442bb
--- /dev/null
+++ b/packages/tasks/src/fill-mask/data.ts
@@ -0,0 +1,79 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A common dataset that is used to train models for many languages.",
+			id: "wikipedia",
+		},
+		{
+			description: "A large English dataset with text crawled from the web.",
+			id: "c4",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "The <mask> barked at me",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				type: "chart",
+				data: [
+					{
+						label: "wolf",
+						score: 0.487,
+					},
+					{
+						label: "dog",
+						score: 0.061,
+					},
+					{
+						label: "cat",
+						score: 0.058,
+					},
+					{
+						label: "fox",
+						score: 0.047,
+					},
+					{
+						label: "squirrel",
+						score: 0.025,
+					},
+				],
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words",
+			id: "cross_entropy",
+		},
+		{
+			description:
+				"Perplexity is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance",
+			id: "perplexity",
+		},
+	],
+	models: [
+		{
+			description: "A faster and smaller model than the famous BERT model.",
+			id: "distilbert-base-uncased",
+		},
+		{
+			description: "A multilingual model trained on 100 languages.",
+			id: "xlm-roberta-base",
+		},
+	],
+	spaces: [],
+	summary:
+		"Masked language modeling is the task of masking some of the words in a sentence and predicting which words should replace those masks. These models are useful when we want to get a statistical understanding of the language in which the model is trained in.",
+	widgetModels: ["distilroberta-base"],
+	youtubeId: "mqElG5QJWUg",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/image-classification/about.md b/packages/tasks/src/image-classification/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..593f3b1ddd5d61ff155ebddeb0ce339adcff4e85
--- /dev/null
+++ b/packages/tasks/src/image-classification/about.md
@@ -0,0 +1,50 @@
+## Use Cases
+
+Image classification models can be used when we are not interested in specific instances of objects with location information or their shape.
+
+### Keyword Classification
+
+Image classification models are used widely in stock photography to assign each image a keyword.
+
+### Image Search
+
+Models trained in image classification can improve user experience by organizing and categorizing photo galleries on the phone or in the cloud, on multiple keywords or tags.
+
+## Inference
+
+With the `transformers` library, you can use the `image-classification` pipeline to infer with image classification models. You can initialize the pipeline with a model id from the Hub. If you do not provide a model id it will initialize with [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) by default. When calling the pipeline you just need to specify a path, http link or an image loaded in PIL. You can also provide a `top_k` parameter which determines how many results it should return.
+
+```python
+from transformers import pipeline
+clf = pipeline("image-classification")
+clf("path_to_a_cat_image")
+
+[{'label': 'tabby cat', 'score': 0.731},
+...
+]
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to classify images using models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.imageClassification({
+	data: await (await fetch("https://picsum.photos/300/300")).blob(),
+	model: "microsoft/resnet-50",
+});
+```
+
+## Useful Resources
+
+- [Let's Play Pictionary with Machine Learning!](https://www.youtube.com/watch?v=LS9Y2wDVI0k)
+- [Fine-Tune ViT for Image Classification with 🤗Transformers](https://huggingface.co/blog/fine-tune-vit)
+- [Walkthrough of Computer Vision Ecosystem in Hugging Face - CV Study Group](https://www.youtube.com/watch?v=oL-xmufhZM8)
+- [Computer Vision Study Group: Swin Transformer](https://www.youtube.com/watch?v=Ngikt-K1Ecc)
+- [Computer Vision Study Group: Masked Autoencoders Paper Walkthrough](https://www.youtube.com/watch?v=Ngikt-K1Ecc)
+- [Image classification task guide](https://huggingface.co/docs/transformers/tasks/image_classification)
+
+### Creating your own image classifier in just a few minutes
+
+With [HuggingPics](https://github.com/nateraw/huggingpics), you can fine-tune Vision Transformers for anything using images found on the web. This project downloads images of classes defined by you, trains a model, and pushes it to the Hub. You even get to try out the model directly with a working widget in the browser, ready to be shared with all your friends!
diff --git a/packages/tasks/src/image-classification/data.ts b/packages/tasks/src/image-classification/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..4dcbff4f17ba2f5811d7a1b3421916a5f3aa83aa
--- /dev/null
+++ b/packages/tasks/src/image-classification/data.ts
@@ -0,0 +1,88 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			// TODO write proper description
+			description: "Benchmark dataset used for image classification with images that belong to 100 classes.",
+			id: "cifar100",
+		},
+		{
+			// TODO write proper description
+			description: "Dataset consisting of images of garments.",
+			id: "fashion_mnist",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "image-classification-input.jpeg",
+				type: "img",
+			},
+		],
+		outputs: [
+			{
+				type: "chart",
+				data: [
+					{
+						label: "Egyptian cat",
+						score: 0.514,
+					},
+					{
+						label: "Tabby cat",
+						score: 0.193,
+					},
+					{
+						label: "Tiger cat",
+						score: 0.068,
+					},
+				],
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "",
+			id: "accuracy",
+		},
+		{
+			description: "",
+			id: "recall",
+		},
+		{
+			description: "",
+			id: "precision",
+		},
+		{
+			description: "",
+			id: "f1",
+		},
+	],
+	models: [
+		{
+			description: "A strong image classification model.",
+			id: "google/vit-base-patch16-224",
+		},
+		{
+			description: "A robust image classification model.",
+			id: "facebook/deit-base-distilled-patch16-224",
+		},
+		{
+			description: "A strong image classification model.",
+			id: "facebook/convnext-large-224",
+		},
+	],
+	spaces: [
+		{
+			// TO DO: write description
+			description: "An application that classifies what a given image is about.",
+			id: "nielsr/perceiver-image-classification",
+		},
+	],
+	summary:
+		"Image classification is the task of assigning a label or class to an entire image. Images are expected to have only one class for each image. Image classification models take an image as input and return a prediction about which class the image belongs to.",
+	widgetModels: ["google/vit-base-patch16-224"],
+	youtubeId: "tjAIM7BOYhw",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/image-segmentation/about.md b/packages/tasks/src/image-segmentation/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..3f26fb8caef4ac8668af9f1f4863c7deb4933e21
--- /dev/null
+++ b/packages/tasks/src/image-segmentation/about.md
@@ -0,0 +1,63 @@
+## Use Cases
+
+### Autonomous Driving
+
+Segmentation models are used to identify road patterns such as lanes and obstacles for safer driving.
+
+### Background Removal
+
+Image Segmentation models are used in cameras to erase the background of certain objects and apply filters to them.
+
+### Medical Imaging
+
+Image Segmentation models are used to distinguish organs or tissues, improving medical imaging workflows. Models are used to segment dental instances, analyze X-Ray scans or even segment cells for pathological diagnosis. This [dataset](https://github.com/v7labs/covid-19-xray-dataset) contains images of lungs of healthy patients and patients with COVID-19 segmented with masks. Another [segmentation dataset](https://ivdm3seg.weebly.com/data.html) contains segmented MRI data of the lower spine to analyze the effect of spaceflight simulation.
+
+## Task Variants
+
+### Semantic Segmentation
+
+Semantic Segmentation is the task of segmenting parts of an image that belong to the same class. Semantic Segmentation models make predictions for each pixel and return the probabilities of the classes for each pixel. These models are evaluated on Mean Intersection Over Union (Mean IoU).
+
+### Instance Segmentation
+
+Instance Segmentation is the variant of Image Segmentation where every distinct object is segmented, instead of one segment per class.
+
+### Panoptic Segmentation
+
+Panoptic Segmentation is the Image Segmentation task that segments the image both by instance and by class, assigning each pixel a different instance of the class.
+
+## Inference
+
+You can infer with Image Segmentation models using the `image-segmentation` pipeline. You need to install [timm](https://github.com/rwightman/pytorch-image-models) first.
+
+```python
+!pip install timm
+model = pipeline("image-segmentation")
+model("cat.png")
+#[{'label': 'cat',
+#  'mask': mask_code,
+#  'score': 0.999}
+# ...]
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer image segmentation models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.imageSegmentation({
+	data: await (await fetch("https://picsum.photos/300/300")).blob(),
+	model: "facebook/detr-resnet-50-panoptic",
+});
+```
+
+## Useful Resources
+
+Would you like to learn more about image segmentation? Great! Here you can find some curated resources that you may find helpful!
+
+- [Fine-Tune a Semantic Segmentation Model with a Custom Dataset](https://huggingface.co/blog/fine-tune-segformer)
+- [Walkthrough of Computer Vision Ecosystem in Hugging Face - CV Study Group](https://www.youtube.com/watch?v=oL-xmufhZM8)
+- [A Guide on Universal Image Segmentation with Mask2Former and OneFormer](https://huggingface.co/blog/mask2former)
+- [Zero-shot image segmentation with CLIPSeg](https://huggingface.co/blog/clipseg-zero-shot)
+- [Semantic segmentation task guide](https://huggingface.co/docs/transformers/tasks/semantic_segmentation)
diff --git a/packages/tasks/src/image-segmentation/data.ts b/packages/tasks/src/image-segmentation/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..c6bb835e79575d57b8658b6ae15bd54ec3a7a9b6
--- /dev/null
+++ b/packages/tasks/src/image-segmentation/data.ts
@@ -0,0 +1,99 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "Scene segmentation dataset.",
+			id: "scene_parse_150",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "image-segmentation-input.jpeg",
+				type: "img",
+			},
+		],
+		outputs: [
+			{
+				filename: "image-segmentation-output.png",
+				type: "img",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"Average Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for each semantic class separately",
+			id: "Average Precision",
+		},
+		{
+			description: "Mean Average Precision (mAP) is the overall average of the AP values",
+			id: "Mean Average Precision",
+		},
+		{
+			description:
+				"Intersection over Union (IoU) is the overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic classes",
+			id: "Mean Intersection over Union",
+		},
+		{
+			description: "APα is the Average Precision at the IoU threshold of a α value, for example, AP50 and AP75",
+			id: "APα",
+		},
+	],
+	models: [
+		{
+			// TO DO: write description
+			description: "Solid panoptic segmentation model trained on the COCO 2017 benchmark dataset.",
+			id: "facebook/detr-resnet-50-panoptic",
+		},
+		{
+			description: "Semantic segmentation model trained on ADE20k benchmark dataset.",
+			id: "microsoft/beit-large-finetuned-ade-640-640",
+		},
+		{
+			description: "Semantic segmentation model trained on ADE20k benchmark dataset with 512x512 resolution.",
+			id: "nvidia/segformer-b0-finetuned-ade-512-512",
+		},
+		{
+			description: "Semantic segmentation model trained Cityscapes dataset.",
+			id: "facebook/mask2former-swin-large-cityscapes-semantic",
+		},
+		{
+			description: "Panoptic segmentation model trained COCO (common objects) dataset.",
+			id: "facebook/mask2former-swin-large-coco-panoptic",
+		},
+	],
+	spaces: [
+		{
+			description: "A semantic segmentation application that can predict unseen instances out of the box.",
+			id: "facebook/ov-seg",
+		},
+		{
+			description: "One of the strongest segmentation applications.",
+			id: "jbrinkma/segment-anything",
+		},
+		{
+			description: "A semantic segmentation application that predicts human silhouettes.",
+			id: "keras-io/Human-Part-Segmentation",
+		},
+		{
+			description: "An instance segmentation application to predict neuronal cell types from microscopy images.",
+			id: "rashmi/sartorius-cell-instance-segmentation",
+		},
+		{
+			description: "An application that segments videos.",
+			id: "ArtGAN/Segment-Anything-Video",
+		},
+		{
+			description: "An panoptic segmentation application built for outdoor environments.",
+			id: "segments/panoptic-segment-anything",
+		},
+	],
+	summary:
+		"Image Segmentation divides an image into segments where each pixel in the image is mapped to an object. This task has multiple variants such as instance segmentation, panoptic segmentation and semantic segmentation.",
+	widgetModels: ["facebook/detr-resnet-50-panoptic"],
+	youtubeId: "dKE8SIt9C-w",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/image-to-image/about.md b/packages/tasks/src/image-to-image/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..d133bafcee0a36643ef43b3bb041d198bafd6934
--- /dev/null
+++ b/packages/tasks/src/image-to-image/about.md
@@ -0,0 +1,79 @@
+## Use Cases
+
+### Style transfer
+
+One of the most popular use cases of image to image is the style transfer. Style transfer models can convert a regular photography into a painting in the style of a famous painter.
+
+## Task Variants
+
+### Image inpainting
+
+Image inpainting is widely used during photography editing to remove unwanted objects, such as poles, wires or sensor
+dust.
+
+### Image colorization
+
+Old, black and white images can be brought up to life using an image colorization model.
+
+### Super Resolution
+
+Super resolution models increase the resolution of an image, allowing for higher quality viewing and printing.
+
+## Inference
+
+You can use pipelines for image-to-image in 🧨diffusers library to easily use image-to-image models. See an example for `StableDiffusionImg2ImgPipeline` below.
+
+```python
+from PIL import Image
+from diffusers import StableDiffusionImg2ImgPipeline
+
+model_id_or_path = "runwayml/stable-diffusion-v1-5"
+pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
+pipe = pipe.to(cuda)
+
+init_image = Image.open("mountains_image.jpeg").convert("RGB").resize((768, 512))
+prompt = "A fantasy landscape, trending on artstation"
+
+images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images
+images[0].save("fantasy_landscape.png")
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer image-to-image models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.imageToImage({
+	data: await (await fetch("image")).blob(),
+	model: "timbrooks/instruct-pix2pix",
+	parameters: {
+		prompt: "Deblur this image",
+	},
+});
+```
+
+## ControlNet
+
+Controlling outputs of diffusion models only with a text prompt is a challenging problem. ControlNet is a neural network type that provides an image based control to diffusion models. These controls can be edges or landmarks in an image.
+
+Many ControlNet models were trained in our community event, JAX Diffusers sprint. You can see the full list of the ControlNet models available [here](https://huggingface.co/spaces/jax-diffusers-event/leaderboard).
+
+## Most Used Model for the Task
+
+Pix2Pix is a popular model used for image to image translation tasks. It is based on a conditional-GAN (generative adversarial network) where instead of a noise vector a 2D image is given as input. More information about Pix2Pix can be retrieved from this [link](https://phillipi.github.io/pix2pix/) where the associated paper and the GitHub repository can be found.
+
+Below images show some of the examples shared in the paper that can be obtained using Pix2Pix. There are various cases this model can be applied on. It is capable of relatively simpler things, e.g. converting a grayscale image to its colored version. But more importantly, it can generate realistic pictures from rough sketches (can be seen in the purse example) or from painting-like images (can be seen in the street and facade examples below).
+
+![Examples](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/image-to-image/pix2pix_examples.jpg)
+
+## Useful Resources
+
+- [Train your ControlNet with diffusers 🧨](https://huggingface.co/blog/train-your-controlnet)
+- [Ultra fast ControlNet with 🧨 Diffusers](https://huggingface.co/blog/controlnet)
+
+## References
+
+[1] P. Isola, J. -Y. Zhu, T. Zhou and A. A. Efros, "Image-to-Image Translation with Conditional Adversarial Networks," 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017, pp. 5967-5976, doi: 10.1109/CVPR.2017.632.
+
+This page was made possible thanks to the efforts of [Paul Gafton](https://github.com/Paul92) and [Osman Alenbey](https://huggingface.co/osman93).
diff --git a/packages/tasks/src/image-to-image/data.ts b/packages/tasks/src/image-to-image/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..9688dc249e923dc55661e30ece51c39e7e04510f
--- /dev/null
+++ b/packages/tasks/src/image-to-image/data.ts
@@ -0,0 +1,101 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "Synthetic dataset, for image relighting",
+			id: "VIDIT",
+		},
+		{
+			description: "Multiple images of celebrities, used for facial expression translation",
+			id: "huggan/CelebA-faces",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "image-to-image-input.jpeg",
+				type: "img",
+			},
+		],
+		outputs: [
+			{
+				filename: "image-to-image-output.png",
+				type: "img",
+			},
+		],
+	},
+	isPlaceholder: false,
+	metrics: [
+		{
+			description:
+				"Peak Signal to Noise Ratio (PSNR) is an approximation of the human perception, considering the ratio of the absolute intensity with respect to the variations. Measured in dB, a high value indicates a high fidelity.",
+			id: "PSNR",
+		},
+		{
+			description:
+				"Structural Similarity Index (SSIM) is a perceptual metric which compares the luminance, contrast and structure of two images. The values of SSIM range between -1 and 1, and higher values indicate closer resemblance to the original image.",
+			id: "SSIM",
+		},
+		{
+			description:
+				"Inception Score (IS) is an analysis of the labels predicted by an image classification model when presented with a sample of the generated images.",
+			id: "IS",
+		},
+	],
+	models: [
+		{
+			description: "A model that enhances images captured in low light conditions.",
+			id: "keras-io/low-light-image-enhancement",
+		},
+		{
+			description: "A model that increases the resolution of an image.",
+			id: "keras-io/super-resolution",
+		},
+		{
+			description:
+				"A model that creates a set of variations of the input image in the style of DALL-E using Stable Diffusion.",
+			id: "lambdalabs/sd-image-variations-diffusers",
+		},
+		{
+			description: "A model that generates images based on segments in the input image and the text prompt.",
+			id: "mfidabel/controlnet-segment-anything",
+		},
+		{
+			description: "A model that takes an image and an instruction to edit the image.",
+			id: "timbrooks/instruct-pix2pix",
+		},
+	],
+	spaces: [
+		{
+			description: "Image enhancer application for low light.",
+			id: "keras-io/low-light-image-enhancement",
+		},
+		{
+			description: "Style transfer application.",
+			id: "keras-io/neural-style-transfer",
+		},
+		{
+			description: "An application that generates images based on segment control.",
+			id: "mfidabel/controlnet-segment-anything",
+		},
+		{
+			description: "Image generation application that takes image control and text prompt.",
+			id: "hysts/ControlNet",
+		},
+		{
+			description: "Colorize any image using this app.",
+			id: "ioclab/brightness-controlnet",
+		},
+		{
+			description: "Edit images with instructions.",
+			id: "timbrooks/instruct-pix2pix",
+		},
+	],
+	summary:
+		"Image-to-image is the task of transforming a source image to match the characteristics of a target image or a target image domain. Any image manipulation and enhancement is possible with image to image models.",
+	widgetModels: ["lllyasviel/sd-controlnet-canny"],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/image-to-text/about.md b/packages/tasks/src/image-to-text/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..a209ae22bc5073bc86f9da2a998b71b28c63ecf1
--- /dev/null
+++ b/packages/tasks/src/image-to-text/about.md
@@ -0,0 +1,65 @@
+## Use Cases
+
+### Image Captioning
+
+Image Captioning is the process of generating textual description of an image.
+This can help the visually impaired people to understand what's happening in their surroundings.
+
+### Optical Character Recognition (OCR)
+
+OCR models convert the text present in an image, e.g. a scanned document, to text.
+
+## Pix2Struct
+
+Pix2Struct is a state-of-the-art model built and released by Google AI. The model itself has to be trained on a downstream task to be used. These tasks include, captioning UI components, images including text, visual questioning infographics, charts, scientific diagrams and more. You can find these models on recommended models of this page.
+
+## Inference
+
+### Image Captioning
+
+You can use the 🤗 Transformers library's `image-to-text` pipeline to generate caption for the Image input.
+
+```python
+from transformers import pipeline
+
+captioner = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base")
+captioner("https://huggingface.co/datasets/Narsil/image_dummy/resolve/main/parrots.png")
+## [{'generated_text': 'two birds are standing next to each other '}]
+```
+
+### OCR
+
+This code snippet uses Microsoft’s TrOCR, an encoder-decoder model consisting of an image Transformer encoder and a text Transformer decoder for state-of-the-art optical character recognition (OCR) on single-text line images.
+
+```python
+from transformers import TrOCRProcessor, VisionEncoderDecoderModel
+
+processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten')
+model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten')
+pixel_values = processor(images="image.jpeg", return_tensors="pt").pixel_values
+
+generated_ids = model.generate(pixel_values)
+generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
+
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer image-to-text models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.imageToText({
+	data: await (await fetch("https://picsum.photos/300/300")).blob(),
+	model: "Salesforce/blip-image-captioning-base",
+});
+```
+
+## Useful Resources
+
+- [Image Captioning](https://huggingface.co/docs/transformers/main/en/tasks/image_captioning)
+- [Image captioning use case](https://blog.google/outreach-initiatives/accessibility/get-image-descriptions/)
+- [Train Image Captioning model on your dataset](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/GIT/Fine_tune_GIT_on_an_image_captioning_dataset.ipynb)
+- [Train OCR model on your dataset ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/TrOCR)
+
+This page was made possible thanks to efforts of [Sukesh Perla](https://huggingface.co/hitchhiker3010) and [Johannes Kolbe](https://huggingface.co/johko).
diff --git a/packages/tasks/src/image-to-text/data.ts b/packages/tasks/src/image-to-text/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..6a838ebead9cf02e8104151e88d419d7921581e9
--- /dev/null
+++ b/packages/tasks/src/image-to-text/data.ts
@@ -0,0 +1,86 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			// TODO write proper description
+			description: "Dataset from 12M image-text of Reddit",
+			id: "red_caps",
+		},
+		{
+			// TODO write proper description
+			description: "Dataset from 3.3M images of Google",
+			id: "datasets/conceptual_captions",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "savanna.jpg",
+				type: "img",
+			},
+		],
+		outputs: [
+			{
+				label: "Detailed description",
+				content: "a herd of giraffes and zebras grazing in a field",
+				type: "text",
+			},
+		],
+	},
+	metrics: [],
+	models: [
+		{
+			description: "A robust image captioning model.",
+			id: "Salesforce/blip-image-captioning-large",
+		},
+		{
+			description: "A strong image captioning model.",
+			id: "nlpconnect/vit-gpt2-image-captioning",
+		},
+		{
+			description: "A strong optical character recognition model.",
+			id: "microsoft/trocr-base-printed",
+		},
+		{
+			description: "A strong visual question answering model for scientific diagrams.",
+			id: "google/pix2struct-ai2d-base",
+		},
+		{
+			description: "A strong captioning model for UI components.",
+			id: "google/pix2struct-widget-captioning-base",
+		},
+		{
+			description: "A captioning model for images that contain text.",
+			id: "google/pix2struct-textcaps-base",
+		},
+	],
+	spaces: [
+		{
+			description: "A robust image captioning application.",
+			id: "flax-community/image-captioning",
+		},
+		{
+			description: "An application that transcribes handwritings into text.",
+			id: "nielsr/TrOCR-handwritten",
+		},
+		{
+			description: "An application that can caption images and answer questions about a given image.",
+			id: "Salesforce/BLIP",
+		},
+		{
+			description: "An application that can caption images and answer questions with a conversational agent.",
+			id: "Salesforce/BLIP2",
+		},
+		{
+			description: "An image captioning application that demonstrates the effect of noise on captions.",
+			id: "johko/capdec-image-captioning",
+		},
+	],
+	summary:
+		"Image to text models output a text from a given image. Image captioning or optical character recognition can be considered as the most common applications of image to text.",
+	widgetModels: ["Salesforce/blip-image-captioning-base"],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/index.ts b/packages/tasks/src/index.ts
new file mode 100644
index 0000000000000000000000000000000000000000..d45f5d5410ce84b54b7646164aadb1f18370051a
--- /dev/null
+++ b/packages/tasks/src/index.ts
@@ -0,0 +1,13 @@
+export type { TaskData, TaskDemo, TaskDemoEntry, ExampleRepo } from "./Types";
+export { TASKS_DATA } from "./tasksData";
+export {
+	PIPELINE_DATA,
+	PIPELINE_TYPES,
+	type PipelineType,
+	type PipelineData,
+	type Modality,
+	MODALITIES,
+	MODALITY_LABELS,
+} from "./pipelines";
+export { ModelLibrary } from "./modelLibraries";
+export type { ModelLibraryKey } from "./modelLibraries";
diff --git a/packages/tasks/src/modelLibraries.ts b/packages/tasks/src/modelLibraries.ts
new file mode 100644
index 0000000000000000000000000000000000000000..6d76980f515c42d5f104289c97cab1535ad4419b
--- /dev/null
+++ b/packages/tasks/src/modelLibraries.ts
@@ -0,0 +1,43 @@
+/**
+ * Add your new library here.
+ *
+ * This is for modeling (= architectures) libraries, not for file formats (like ONNX, etc).
+ * File formats live in an enum inside the internal codebase.
+ */
+export enum ModelLibrary {
+	"adapter-transformers" = "Adapter Transformers",
+	"allennlp" = "allenNLP",
+	"asteroid" = "Asteroid",
+	"bertopic" = "BERTopic",
+	"diffusers" = "Diffusers",
+	"doctr" = "docTR",
+	"espnet" = "ESPnet",
+	"fairseq" = "Fairseq",
+	"flair" = "Flair",
+	"keras" = "Keras",
+	"k2" = "K2",
+	"nemo" = "NeMo",
+	"open_clip" = "OpenCLIP",
+	"paddlenlp" = "PaddleNLP",
+	"peft" = "PEFT",
+	"pyannote-audio" = "pyannote.audio",
+	"sample-factory" = "Sample Factory",
+	"sentence-transformers" = "Sentence Transformers",
+	"sklearn" = "Scikit-learn",
+	"spacy" = "spaCy",
+	"span-marker" = "SpanMarker",
+	"speechbrain" = "speechbrain",
+	"tensorflowtts" = "TensorFlowTTS",
+	"timm" = "Timm",
+	"fastai" = "fastai",
+	"transformers" = "Transformers",
+	"transformers.js" = "Transformers.js",
+	"stanza" = "Stanza",
+	"fasttext" = "fastText",
+	"stable-baselines3" = "Stable-Baselines3",
+	"ml-agents" = "ML-Agents",
+	"pythae" = "Pythae",
+	"mindspore" = "MindSpore",
+}
+
+export type ModelLibraryKey = keyof typeof ModelLibrary;
diff --git a/packages/tasks/src/object-detection/about.md b/packages/tasks/src/object-detection/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..4dda21224f937a27a5f56d40ee877ff03eaf1d09
--- /dev/null
+++ b/packages/tasks/src/object-detection/about.md
@@ -0,0 +1,37 @@
+## Use Cases
+
+### Autonomous Driving
+
+Object Detection is widely used in computer vision for autonomous driving. Self-driving cars use Object Detection models to detect pedestrians, bicycles, traffic lights and road signs to decide which step to take.
+
+### Object Tracking in Matches
+
+Object Detection models are widely used in sports where the ball or a player is tracked for monitoring and refereeing during matches.
+
+### Image Search
+
+Object Detection models are widely used in image search. Smartphones use Object Detection models to detect entities (such as specific places or objects) and allow the user to search for the entity on the Internet.
+
+### Object Counting
+
+Object Detection models are used to count instances of objects in a given image, this can include counting the objects in warehouses or stores, or counting the number of visitors in a store. They are also used to manage crowds at events to prevent disasters.
+
+## Inference
+
+You can infer with Object Detection models through the `object-detection` pipeline. When calling the pipeline you just need to specify a path or http link to an image.
+
+```python
+model = pipeline("object-detection")
+
+model("path_to_cat_image")
+
+# [{'label': 'blanket',
+#  'mask': mask_string,
+#  'score': 0.917},
+#...]
+```
+
+# Useful Resources
+
+- [Walkthrough of Computer Vision Ecosystem in Hugging Face - CV Study Group](https://www.youtube.com/watch?v=oL-xmufhZM8)
+- [Object detection task guide](https://huggingface.co/docs/transformers/tasks/object_detection)
diff --git a/packages/tasks/src/object-detection/data.ts b/packages/tasks/src/object-detection/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..8ffe8ea1b070c7cd89427197093c3e9a18cefbee
--- /dev/null
+++ b/packages/tasks/src/object-detection/data.ts
@@ -0,0 +1,76 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			// TODO write proper description
+			description: "Widely used benchmark dataset for multiple Vision tasks.",
+			id: "merve/coco2017",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "object-detection-input.jpg",
+				type: "img",
+			},
+		],
+		outputs: [
+			{
+				filename: "object-detection-output.jpg",
+				type: "img",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"The Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It is calculated for each class separately",
+			id: "Average Precision",
+		},
+		{
+			description: "The Mean Average Precision (mAP) metric is the overall average of the AP values",
+			id: "Mean Average Precision",
+		},
+		{
+			description:
+				"The APα metric is the Average Precision at the IoU threshold of a α value, for example, AP50 and AP75",
+			id: "APα",
+		},
+	],
+	models: [
+		{
+			// TO DO: write description
+			description: "Solid object detection model trained on the benchmark dataset COCO 2017.",
+			id: "facebook/detr-resnet-50",
+		},
+		{
+			description: "Strong object detection model trained on ImageNet-21k dataset.",
+			id: "microsoft/beit-base-patch16-224-pt22k-ft22k",
+		},
+	],
+	spaces: [
+		{
+			description: "An object detection application that can detect unseen objects out of the box.",
+			id: "adirik/OWL-ViT",
+		},
+		{
+			description: "An application that contains various object detection models to try from.",
+			id: "Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS",
+		},
+		{
+			description: "An application that shows multiple cutting edge techniques for object detection and tracking.",
+			id: "kadirnar/torchyolo",
+		},
+		{
+			description: "An object tracking, segmentation and inpainting application.",
+			id: "VIPLab/Track-Anything",
+		},
+	],
+	summary:
+		"Object Detection models allow users to identify objects of certain defined classes. Object detection models receive an image as input and output the images with bounding boxes and labels on detected objects.",
+	widgetModels: ["facebook/detr-resnet-50"],
+	youtubeId: "WdAeKSOpxhw",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/pipelines.ts b/packages/tasks/src/pipelines.ts
new file mode 100644
index 0000000000000000000000000000000000000000..ae487d5e30bb88aa4119953e7230ba2ebb31e8c0
--- /dev/null
+++ b/packages/tasks/src/pipelines.ts
@@ -0,0 +1,619 @@
+export const MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"] as const;
+
+export type Modality = (typeof MODALITIES)[number];
+
+export const MODALITY_LABELS = {
+	multimodal: "Multimodal",
+	nlp: "Natural Language Processing",
+	audio: "Audio",
+	cv: "Computer Vision",
+	rl: "Reinforcement Learning",
+	tabular: "Tabular",
+	other: "Other",
+} satisfies Record<Modality, string>;
+
+/**
+ * Public interface for a sub task.
+ *
+ * This can be used in a model card's `model-index` metadata.
+ * and is more granular classification that can grow significantly
+ * over time as new tasks are added.
+ */
+export interface SubTask {
+	/**
+	 * type of the task (e.g. audio-source-separation)
+	 */
+	type: string;
+	/**
+	 * displayed name of the task (e.g. Audio Source Separation)
+	 */
+	name: string;
+}
+
+/**
+ * Public interface for a PipelineData.
+ *
+ * This information corresponds to a pipeline type (aka task)
+ * in the Hub.
+ */
+export interface PipelineData {
+	/**
+	 * displayed name of the task (e.g. Text Classification)
+	 */
+	name: string;
+	subtasks?: SubTask[];
+	modality: Modality;
+	/**
+	 * color for the tag icon.
+	 */
+	color: "blue" | "green" | "indigo" | "orange" | "red" | "yellow";
+	/**
+	 * whether to hide in /models filters
+	 */
+	hideInModels?: boolean;
+	/**
+	 * whether to hide in /datasets filters
+	 */
+	hideInDatasets?: boolean;
+}
+
+/// Coarse-grained taxonomy of tasks
+///
+/// This type is used in multiple places in the Hugging Face
+/// ecosystem:
+///  - To determine which widget to show.
+///  - To determine which endpoint of Inference API to use.
+///  - As filters at the left of models and datasets page.
+///
+/// Note that this is sensitive to order.
+/// For each domain, the order should be of decreasing specificity.
+/// This will impact the default pipeline tag of a model when not
+/// specified.
+export const PIPELINE_DATA = {
+	"text-classification": {
+		name: "Text Classification",
+		subtasks: [
+			{
+				type: "acceptability-classification",
+				name: "Acceptability Classification",
+			},
+			{
+				type: "entity-linking-classification",
+				name: "Entity Linking Classification",
+			},
+			{
+				type: "fact-checking",
+				name: "Fact Checking",
+			},
+			{
+				type: "intent-classification",
+				name: "Intent Classification",
+			},
+			{
+				type: "language-identification",
+				name: "Language Identification",
+			},
+			{
+				type: "multi-class-classification",
+				name: "Multi Class Classification",
+			},
+			{
+				type: "multi-label-classification",
+				name: "Multi Label Classification",
+			},
+			{
+				type: "multi-input-text-classification",
+				name: "Multi-input Text Classification",
+			},
+			{
+				type: "natural-language-inference",
+				name: "Natural Language Inference",
+			},
+			{
+				type: "semantic-similarity-classification",
+				name: "Semantic Similarity Classification",
+			},
+			{
+				type: "sentiment-classification",
+				name: "Sentiment Classification",
+			},
+			{
+				type: "topic-classification",
+				name: "Topic Classification",
+			},
+			{
+				type: "semantic-similarity-scoring",
+				name: "Semantic Similarity Scoring",
+			},
+			{
+				type: "sentiment-scoring",
+				name: "Sentiment Scoring",
+			},
+			{
+				type: "sentiment-analysis",
+				name: "Sentiment Analysis",
+			},
+			{
+				type: "hate-speech-detection",
+				name: "Hate Speech Detection",
+			},
+			{
+				type: "text-scoring",
+				name: "Text Scoring",
+			},
+		],
+		modality: "nlp",
+		color: "orange",
+	},
+	"token-classification": {
+		name: "Token Classification",
+		subtasks: [
+			{
+				type: "named-entity-recognition",
+				name: "Named Entity Recognition",
+			},
+			{
+				type: "part-of-speech",
+				name: "Part of Speech",
+			},
+			{
+				type: "parsing",
+				name: "Parsing",
+			},
+			{
+				type: "lemmatization",
+				name: "Lemmatization",
+			},
+			{
+				type: "word-sense-disambiguation",
+				name: "Word Sense Disambiguation",
+			},
+			{
+				type: "coreference-resolution",
+				name: "Coreference-resolution",
+			},
+		],
+		modality: "nlp",
+		color: "blue",
+	},
+	"table-question-answering": {
+		name: "Table Question Answering",
+		modality: "nlp",
+		color: "green",
+	},
+	"question-answering": {
+		name: "Question Answering",
+		subtasks: [
+			{
+				type: "extractive-qa",
+				name: "Extractive QA",
+			},
+			{
+				type: "open-domain-qa",
+				name: "Open Domain QA",
+			},
+			{
+				type: "closed-domain-qa",
+				name: "Closed Domain QA",
+			},
+		],
+		modality: "nlp",
+		color: "blue",
+	},
+	"zero-shot-classification": {
+		name: "Zero-Shot Classification",
+		modality: "nlp",
+		color: "yellow",
+	},
+	translation: {
+		name: "Translation",
+		modality: "nlp",
+		color: "green",
+	},
+	summarization: {
+		name: "Summarization",
+		subtasks: [
+			{
+				type: "news-articles-summarization",
+				name: "News Articles Summarization",
+			},
+			{
+				type: "news-articles-headline-generation",
+				name: "News Articles Headline Generation",
+			},
+		],
+		modality: "nlp",
+		color: "indigo",
+	},
+	conversational: {
+		name: "Conversational",
+		subtasks: [
+			{
+				type: "dialogue-generation",
+				name: "Dialogue Generation",
+			},
+		],
+		modality: "nlp",
+		color: "green",
+	},
+	"feature-extraction": {
+		name: "Feature Extraction",
+		modality: "multimodal",
+		color: "red",
+	},
+	"text-generation": {
+		name: "Text Generation",
+		subtasks: [
+			{
+				type: "dialogue-modeling",
+				name: "Dialogue Modeling",
+			},
+			{
+				type: "language-modeling",
+				name: "Language Modeling",
+			},
+		],
+		modality: "nlp",
+		color: "indigo",
+	},
+	"text2text-generation": {
+		name: "Text2Text Generation",
+		subtasks: [
+			{
+				type: "text-simplification",
+				name: "Text simplification",
+			},
+			{
+				type: "explanation-generation",
+				name: "Explanation Generation",
+			},
+			{
+				type: "abstractive-qa",
+				name: "Abstractive QA",
+			},
+			{
+				type: "open-domain-abstractive-qa",
+				name: "Open Domain Abstractive QA",
+			},
+			{
+				type: "closed-domain-qa",
+				name: "Closed Domain QA",
+			},
+			{
+				type: "open-book-qa",
+				name: "Open Book QA",
+			},
+			{
+				type: "closed-book-qa",
+				name: "Closed Book QA",
+			},
+		],
+		modality: "nlp",
+		color: "indigo",
+	},
+	"fill-mask": {
+		name: "Fill-Mask",
+		subtasks: [
+			{
+				type: "slot-filling",
+				name: "Slot Filling",
+			},
+			{
+				type: "masked-language-modeling",
+				name: "Masked Language Modeling",
+			},
+		],
+		modality: "nlp",
+		color: "red",
+	},
+	"sentence-similarity": {
+		name: "Sentence Similarity",
+		modality: "nlp",
+		color: "yellow",
+	},
+	"text-to-speech": {
+		name: "Text-to-Speech",
+		modality: "audio",
+		color: "yellow",
+	},
+	"text-to-audio": {
+		name: "Text-to-Audio",
+		modality: "audio",
+		color: "yellow",
+	},
+	"automatic-speech-recognition": {
+		name: "Automatic Speech Recognition",
+		modality: "audio",
+		color: "yellow",
+	},
+	"audio-to-audio": {
+		name: "Audio-to-Audio",
+		modality: "audio",
+		color: "blue",
+	},
+	"audio-classification": {
+		name: "Audio Classification",
+		subtasks: [
+			{
+				type: "keyword-spotting",
+				name: "Keyword Spotting",
+			},
+			{
+				type: "speaker-identification",
+				name: "Speaker Identification",
+			},
+			{
+				type: "audio-intent-classification",
+				name: "Audio Intent Classification",
+			},
+			{
+				type: "audio-emotion-recognition",
+				name: "Audio Emotion Recognition",
+			},
+			{
+				type: "audio-language-identification",
+				name: "Audio Language Identification",
+			},
+		],
+		modality: "audio",
+		color: "green",
+	},
+	"voice-activity-detection": {
+		name: "Voice Activity Detection",
+		modality: "audio",
+		color: "red",
+	},
+	"depth-estimation": {
+		name: "Depth Estimation",
+		modality: "cv",
+		color: "yellow",
+	},
+	"image-classification": {
+		name: "Image Classification",
+		subtasks: [
+			{
+				type: "multi-label-image-classification",
+				name: "Multi Label Image Classification",
+			},
+			{
+				type: "multi-class-image-classification",
+				name: "Multi Class Image Classification",
+			},
+		],
+		modality: "cv",
+		color: "blue",
+	},
+	"object-detection": {
+		name: "Object Detection",
+		subtasks: [
+			{
+				type: "face-detection",
+				name: "Face Detection",
+			},
+			{
+				type: "vehicle-detection",
+				name: "Vehicle Detection",
+			},
+		],
+		modality: "cv",
+		color: "yellow",
+	},
+	"image-segmentation": {
+		name: "Image Segmentation",
+		subtasks: [
+			{
+				type: "instance-segmentation",
+				name: "Instance Segmentation",
+			},
+			{
+				type: "semantic-segmentation",
+				name: "Semantic Segmentation",
+			},
+			{
+				type: "panoptic-segmentation",
+				name: "Panoptic Segmentation",
+			},
+		],
+		modality: "cv",
+		color: "green",
+	},
+	"text-to-image": {
+		name: "Text-to-Image",
+		modality: "multimodal",
+		color: "yellow",
+	},
+	"image-to-text": {
+		name: "Image-to-Text",
+		subtasks: [
+			{
+				type: "image-captioning",
+				name: "Image Captioning",
+			},
+		],
+		modality: "multimodal",
+		color: "red",
+	},
+	"image-to-image": {
+		name: "Image-to-Image",
+		modality: "cv",
+		color: "indigo",
+	},
+	"unconditional-image-generation": {
+		name: "Unconditional Image Generation",
+		modality: "cv",
+		color: "green",
+	},
+	"video-classification": {
+		name: "Video Classification",
+		modality: "cv",
+		color: "blue",
+	},
+	"reinforcement-learning": {
+		name: "Reinforcement Learning",
+		modality: "rl",
+		color: "red",
+	},
+	robotics: {
+		name: "Robotics",
+		modality: "rl",
+		subtasks: [
+			{
+				type: "grasping",
+				name: "Grasping",
+			},
+			{
+				type: "task-planning",
+				name: "Task Planning",
+			},
+		],
+		color: "blue",
+	},
+	"tabular-classification": {
+		name: "Tabular Classification",
+		modality: "tabular",
+		subtasks: [
+			{
+				type: "tabular-multi-class-classification",
+				name: "Tabular Multi Class Classification",
+			},
+			{
+				type: "tabular-multi-label-classification",
+				name: "Tabular Multi Label Classification",
+			},
+		],
+		color: "blue",
+	},
+	"tabular-regression": {
+		name: "Tabular Regression",
+		modality: "tabular",
+		subtasks: [
+			{
+				type: "tabular-single-column-regression",
+				name: "Tabular Single Column Regression",
+			},
+		],
+		color: "blue",
+	},
+	"tabular-to-text": {
+		name: "Tabular to Text",
+		modality: "tabular",
+		subtasks: [
+			{
+				type: "rdf-to-text",
+				name: "RDF to text",
+			},
+		],
+		color: "blue",
+		hideInModels: true,
+	},
+	"table-to-text": {
+		name: "Table to Text",
+		modality: "nlp",
+		color: "blue",
+		hideInModels: true,
+	},
+	"multiple-choice": {
+		name: "Multiple Choice",
+		subtasks: [
+			{
+				type: "multiple-choice-qa",
+				name: "Multiple Choice QA",
+			},
+			{
+				type: "multiple-choice-coreference-resolution",
+				name: "Multiple Choice Coreference Resolution",
+			},
+		],
+		modality: "nlp",
+		color: "blue",
+		hideInModels: true,
+	},
+	"text-retrieval": {
+		name: "Text Retrieval",
+		subtasks: [
+			{
+				type: "document-retrieval",
+				name: "Document Retrieval",
+			},
+			{
+				type: "utterance-retrieval",
+				name: "Utterance Retrieval",
+			},
+			{
+				type: "entity-linking-retrieval",
+				name: "Entity Linking Retrieval",
+			},
+			{
+				type: "fact-checking-retrieval",
+				name: "Fact Checking Retrieval",
+			},
+		],
+		modality: "nlp",
+		color: "indigo",
+		hideInModels: true,
+	},
+	"time-series-forecasting": {
+		name: "Time Series Forecasting",
+		modality: "tabular",
+		subtasks: [
+			{
+				type: "univariate-time-series-forecasting",
+				name: "Univariate Time Series Forecasting",
+			},
+			{
+				type: "multivariate-time-series-forecasting",
+				name: "Multivariate Time Series Forecasting",
+			},
+		],
+		color: "blue",
+		hideInModels: true,
+	},
+	"text-to-video": {
+		name: "Text-to-Video",
+		modality: "multimodal",
+		color: "green",
+	},
+	"visual-question-answering": {
+		name: "Visual Question Answering",
+		subtasks: [
+			{
+				type: "visual-question-answering",
+				name: "Visual Question Answering",
+			},
+		],
+		modality: "multimodal",
+		color: "red",
+	},
+	"document-question-answering": {
+		name: "Document Question Answering",
+		subtasks: [
+			{
+				type: "document-question-answering",
+				name: "Document Question Answering",
+			},
+		],
+		modality: "multimodal",
+		color: "blue",
+		hideInDatasets: true,
+	},
+	"zero-shot-image-classification": {
+		name: "Zero-Shot Image Classification",
+		modality: "cv",
+		color: "yellow",
+	},
+	"graph-ml": {
+		name: "Graph Machine Learning",
+		modality: "multimodal",
+		color: "green",
+	},
+	other: {
+		name: "Other",
+		modality: "other",
+		color: "blue",
+		hideInModels: true,
+		hideInDatasets: true,
+	},
+} satisfies Record<string, PipelineData>;
+
+export type PipelineType = keyof typeof PIPELINE_DATA;
+export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[];
diff --git a/packages/tasks/src/placeholder/about.md b/packages/tasks/src/placeholder/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..fdb45584410dcd07e530607d469140038ede6b25
--- /dev/null
+++ b/packages/tasks/src/placeholder/about.md
@@ -0,0 +1,15 @@
+## Use Cases
+
+You can contribute this area with common use cases of the task!
+
+## Task Variants
+
+This place can be filled with variants of this task if there's any.
+
+## Inference
+
+This section should have useful information about how to pull a model from Hugging Face Hub that is a part of a library specialized in a task and use it.
+
+## Useful Resources
+
+In this area, you can insert useful resources about how to train or use a model for this task.
diff --git a/packages/tasks/src/placeholder/data.ts b/packages/tasks/src/placeholder/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..3660b52194e88b0ae1391b34b970d6b6c1e27cc4
--- /dev/null
+++ b/packages/tasks/src/placeholder/data.ts
@@ -0,0 +1,18 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [],
+	demo: {
+		inputs: [],
+		outputs: [],
+	},
+	isPlaceholder: true,
+	metrics: [],
+	models: [],
+	spaces: [],
+	summary: "",
+	widgetModels: [],
+	youtubeId: undefined,
+};
+
+export default taskData;
diff --git a/packages/tasks/src/question-answering/about.md b/packages/tasks/src/question-answering/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..d5934ee80c7ca32c53726ef5b9e715af54a0b5d6
--- /dev/null
+++ b/packages/tasks/src/question-answering/about.md
@@ -0,0 +1,56 @@
+## Use Cases
+
+### Frequently Asked Questions
+
+You can use Question Answering (QA) models to automate the response to frequently asked questions by using a knowledge base (documents) as context. Answers to customer questions can be drawn from those documents.
+
+⚡⚡ If you’d like to save inference time, you can first use [passage ranking models](/tasks/sentence-similarity) to see which document might contain the answer to the question and iterate over that document with the QA model instead.
+
+## Task Variants
+There are different QA variants based on the inputs and outputs:
+
+- **Extractive QA:** The model **extracts** the answer from a context. The context here could be a provided text, a table or even HTML! This is usually solved with BERT-like models.
+- **Open Generative QA:** The model **generates** free text directly based on the context. You can learn more about the Text Generation task in [its page](/tasks/text-generation).
+- **Closed Generative QA:** In this case, no context is provided. The answer is completely generated by a model.
+
+The schema above illustrates extractive, open book QA. The model takes a context and the question and extracts the answer from the given context.
+
+You can also differentiate QA models depending on whether they are open-domain or closed-domain. Open-domain models are not restricted to a specific domain, while closed-domain models are restricted to a specific domain (e.g. legal, medical documents).
+
+## Inference
+
+You can infer with QA models with the 🤗 Transformers library using the `question-answering` pipeline. If no model checkpoint is given, the pipeline will be initialized with `distilbert-base-cased-distilled-squad`. This pipeline takes a question and a context from which the answer will be extracted and returned.
+
+```python
+from transformers import pipeline
+
+qa_model = pipeline("question-answering")
+question = "Where do I live?"
+context = "My name is Merve and I live in İstanbul."
+qa_model(question = question, context = context)
+## {'answer': 'İstanbul', 'end': 39, 'score': 0.953, 'start': 31}
+```
+
+## Useful Resources
+
+Would you like to learn more about QA? Awesome! Here are some curated resources that you may find helpful!
+
+- [Course Chapter on Question Answering](https://huggingface.co/course/chapter7/7?fw=pt)
+- [Question Answering Workshop](https://www.youtube.com/watch?v=Ihgk8kGLpIE&ab_channel=HuggingFace)
+- [How to Build an Open-Domain Question Answering System?](https://lilianweng.github.io/lil-log/2020/10/29/open-domain-question-answering.html)
+- [Blog Post: ELI5 A Model for Open Domain Long Form Question Answering](https://yjernite.github.io/lfqa.html)
+
+### Notebooks
+
+- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/question_answering.ipynb)
+- [TensorFlow](https://github.com/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)
+
+### Scripts for training
+
+- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)
+- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering)
+- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering)
+
+### Documentation
+
+- [Question answering task guide](https://huggingface.co/docs/transformers/tasks/question_answering)
diff --git a/packages/tasks/src/question-answering/data.ts b/packages/tasks/src/question-answering/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..dee5ccf644cb53a148a6a6c0bc4bb398b3bf6c27
--- /dev/null
+++ b/packages/tasks/src/question-answering/data.ts
@@ -0,0 +1,71 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			// TODO write proper description
+			description: "A famous question answering dataset based on English articles from Wikipedia.",
+			id: "squad_v2",
+		},
+		{
+			// TODO write proper description
+			description: "A dataset of aggregated anonymized actual queries issued to the Google search engine.",
+			id: "natural_questions",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Question",
+				content: "Which name is also used to describe the Amazon rainforest in English?",
+				type: "text",
+			},
+			{
+				label: "Context",
+				content: "The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				label: "Answer",
+				content: "Amazonia",
+				type: "text",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0",
+			id: "exact-match",
+		},
+		{
+			description:
+				" The F1-Score metric is useful if we value both false positives and false negatives equally. The F1-Score is calculated on each word in the predicted sequence against the correct answer",
+			id: "f1",
+		},
+	],
+	models: [
+		{
+			description: "A robust baseline model for most question answering domains.",
+			id: "deepset/roberta-base-squad2",
+		},
+		{
+			description: "A special model that can answer questions from tables!",
+			id: "google/tapas-base-finetuned-wtq",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that can answer a long question from Wikipedia.",
+			id: "deepset/wikipedia-assistant",
+		},
+	],
+	summary:
+		"Question Answering models can retrieve the answer to a question from a given text, which is useful for searching for an answer in a document. Some question answering models can generate answers without context!",
+	widgetModels: ["deepset/roberta-base-squad2"],
+	youtubeId: "ajPx5LwJD-I",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/reinforcement-learning/about.md b/packages/tasks/src/reinforcement-learning/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..13f79cfff65cea30b22a4c667b8f83964f7c00f5
--- /dev/null
+++ b/packages/tasks/src/reinforcement-learning/about.md
@@ -0,0 +1,167 @@
+## Use Cases
+
+### Gaming
+
+Reinforcement learning is known for its application to video games. Since the games provide a safe environment for the agent to be trained in the sense that it is perfectly defined and controllable, this makes them perfect candidates for experimentation and will help a lot to learn about the capabilities and limitations of various RL algorithms.
+
+There are many videos on the Internet where a game-playing reinforcement learning agent starts with a terrible gaming strategy due to random initialization of its settings, but over iterations, the agent gets better and better with each episode of the training. This [paper](https://arxiv.org/abs/1912.10944) mainly investigates the performance of RL in popular games such as Minecraft or Dota2. The agent's performance can exceed a human player's, although there are still some challenges mainly related to efficiency in constructing the gaming policy of the reinforcement learning agent.
+
+### Trading and Finance
+
+Reinforcement learning is the science to train computers to make decisions and thus has a novel use in trading and finance. All time-series models are helpful in predicting prices, volume and future sales of a product or a stock. Reinforcement based automated agents can decide to sell, buy or hold a stock. It shifts the impact of AI in this field to real time decision making rather than just prediction of prices. The glossary given below will clear some parameters to as to how we can train a model to take these decisions.
+
+## Task Variants
+
+### Model Based RL
+
+In model based reinforcement learning techniques intend to create a model of the environment, learn the state transition probabilities and the reward function, to find the optimal action. Some typical examples for model based reinforcement learning algorithms are dynamic programming, value iteration and policy iteration.
+
+### Model Free RL
+
+In model free reinforcement learning, agent decides on optimal actions based on its experience in the environment and the reward it collects from it. This is one of the most commonly used algorithms beneficial in complex environments, where modeling of state transition probabilities and reward functions are difficult. Some of the examples of model free reinforcement learning are SARSA, Q-Learning, actor-critic and proximal policy optimization (PPO) algorithms.
+
+## Glossary
+
+<!-- ![RL Loop](https://huggingface.co/blog/assets/63_deep_rl_intro/RL_process.jpg "Agent Environment Interaction") TODO: Uncomment image for visual understanding if it fits within the page-->
+
+**Agent:** The learner and the decision maker.
+
+**Environment:** The part of the world the agent interacts, comprising everything outside the agent.
+
+Observations and states are the information our agent gets from the environment. In the case of a video game, it can be a frame (a screenshot). In the case of the trading agent, it can be the value of a certain stock.
+
+**State:** Complete description of the state of the environment with no hidden information.
+
+**Observation:** Partial description of the state, in a partially observed environment.
+
+**Action:** The decision taken by the agent.
+
+**Reward:** The numerical feedback signal that the agent receives from the environment based on the chosen action.
+
+**Return:** Cumulative Reward. In the simplest case, the return is the sum of the rewards.
+
+**Episode:** For some applications there is a natural notion of final time step. In this case, there is a starting point and an ending point (a terminal state). This creates an episode: a list of States, Actions, Rewards, and new States. For instance, think about Chess: an episode begins at the initial board position and ends when the game is over.
+
+**Policy:** The Policy is the brain of the Agent, it’s the function that tells what action to take given the state. So it defines the agent’s behavior at a given time. Reinforcement learning methods specify how the agent’s policy is changed as a result of its experience.
+
+## Inference
+
+Inference in reinforcement learning differs from other modalities, in which there's a model and test data. In reinforcement learning, once you have trained an agent in an environment, you try to run the trained agent for additional steps to get the average reward.
+
+A typical training cycle consists of gathering experience from the environment, training the agent, and running the agent on a test environment to obtain average reward. Below there's a snippet on how you can interact with the environment using the `gymnasium` library, train an agent using `stable-baselines3`, evalute the agent on test environment and infer actions from the trained agent.
+
+```python
+# Here we are running 20 episodes of CartPole-v1 environment, taking random actions
+import gymnasium as gym
+
+env = gym.make("CartPole-v1")
+observation, info = env.reset()
+
+for _ in range(20):
+	action = env.action_space.sample() # samples random action from action sample space
+
+        # the agent takes the action
+	observation, reward, terminated, truncated, info = env.step(action)
+
+
+# if the agent reaches terminal state, we reset the environment
+if terminated or truncated:
+
+	print("Environment is reset")
+	observation = env.reset()
+
+env.close()
+```
+
+Below snippet shows how to train a PPO model on LunarLander-v2 environment using `stable-baselines3` library and saving the model
+
+```python
+from stable_baselines3 import PPO
+
+# initialize the environment
+
+env = gym.make("LunarLander-v2")
+
+# initialize the model
+
+model = PPO(policy = "MlpPolicy",
+			env = env,
+			n_steps = 1024,
+			batch_size = 64,
+			n_epochs = 4,
+			verbose = 1)
+
+# train the model for 1000 time steps
+model.learn(total_timesteps = 1000)
+
+# Saving the model in desired directory
+model_name = "PPO-LunarLander-v2"
+model.save(model_name)
+```
+
+Below code shows how to evaluate an agent trained using `stable-baselines3`
+
+```python
+# Loading a saved model and evaluating the model for 10 episodes
+from stable_baselines3.common.evaluation import evaluate_policy
+from stable_baselines3 import PPO
+
+
+env = gym.make("LunarLander-v2")
+# Loading the saved model
+model = PPO.load("PPO-LunarLander-v2",env=env)
+
+# Initializating the evaluation environment
+eval_env = gym.make("LunarLander-v2")
+
+# Running the trained agent on eval_env for 10 time steps and getting the mean reward
+mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes = 10,
+										  deterministic=True)
+
+print(f"mean_reward={mean_reward:.2f} +/- {std_reward}")
+```
+
+Below code snippet shows how to infer actions from an agent trained using `stable-baselines3`
+
+```python
+from stable_baselines3.common.evaluation import evaluate_policy
+from stable_baselines3 import PPO
+
+# Loading the saved model
+model = PPO.load("PPO-LunarLander-v2",env=env)
+
+# Getting the environment from the trained agent
+env = model.get_env()
+
+obs = env.reset()
+for i in range(1000):
+	# getting action predictions from the trained agent
+	action, _states = model.predict(obs, deterministic=True)
+
+	# taking the predicted action in the environment to observe next state and rewards
+    obs, rewards, dones, info = env.step(action)
+```
+
+For more information, you can check out the documentations of the respective libraries.
+
+[Gymnasium Documentation](https://gymnasium.farama.org/)
+[Stable Baselines Documentation](https://stable-baselines3.readthedocs.io/en/master/)
+
+## Useful Resources
+
+Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful!
+
+- [HuggingFace Deep Reinforcement Learning Class](https://github.com/huggingface/deep-rl-class)
+- [Introduction to Deep Reinforcement Learning](https://huggingface.co/blog/deep-rl-intro)
+- [Stable Baselines Integration with HuggingFace](https://huggingface.co/blog/sb3)
+- Learn how reinforcement learning is used in conversational agents in this blog: [Illustrating Reinforcement Learning from Human Feedback (RLHF)](https://huggingface.co/blog/rlhf)
+- [Reinforcement Learning from Human Feedback From Zero to ChatGPT](https://www.youtube.com/watch?v=EAd4oQtEJOM)
+- [Guide on Multi-Agent Competition Systems](https://huggingface.co/blog/aivsai)
+
+### Notebooks
+
+- [Train a Deep Reinforcement Learning lander agent to land correctly on the Moon 🌕 using Stable-Baselines3](https://github.com/huggingface/deep-rl-class/blob/main/notebooks/unit1/unit1.ipynb)
+- [Introduction to Unity MLAgents](https://github.com/huggingface/deep-rl-class/blob/main/notebooks/unit5/unit5.ipynb)
+- [Training Decision Transformers with 🤗 transformers](https://github.com/huggingface/blog/blob/main/notebooks/101_train-decision-transformers.ipynb)
+
+This page was made possible thanks to the efforts of [Ram Ananth](https://huggingface.co/RamAnanth1), [Emilio Lehoucq](https://huggingface.co/emiliol), [Sagar Mathpal](https://huggingface.co/sagarmathpal) and [Osman Alenbey](https://huggingface.co/osman93).
diff --git a/packages/tasks/src/reinforcement-learning/data.ts b/packages/tasks/src/reinforcement-learning/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..78731ec20366f3551e0366e3001c7b708c909487
--- /dev/null
+++ b/packages/tasks/src/reinforcement-learning/data.ts
@@ -0,0 +1,75 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A curation of widely used datasets for Data Driven Deep Reinforcement Learning (D4RL)",
+			id: "edbeeching/decision_transformer_gym_replay",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "State",
+				content: "Red traffic light, pedestrians are about to pass.",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				label: "Action",
+				content: "Stop the car.",
+				type: "text",
+			},
+			{
+				label: "Next State",
+				content: "Yellow light, pedestrians have crossed.",
+				type: "text",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"Accumulated reward across all time steps discounted by a factor that ranges between 0 and 1 and determines how much the agent optimizes for future relative to immediate rewards. Measures how good is the policy ultimately found by a given algorithm considering uncertainty over the future.",
+			id: "Discounted Total Reward",
+		},
+		{
+			description:
+				"Average return obtained after running the policy for a certain number of evaluation episodes. As opposed to total reward, mean reward considers how much reward a given algorithm receives while learning.",
+			id: "Mean Reward",
+		},
+		{
+			description:
+				"Measures how good a given algorithm is after a predefined time. Some algorithms may be guaranteed to converge to optimal behavior across many time steps. However, an agent that reaches an acceptable level of optimality after a given time horizon may be preferable to one that ultimately reaches optimality but takes a long time.",
+			id: "Level of Performance After Some Time",
+		},
+	],
+	models: [
+		{
+			description: "A Reinforcement Learning model trained on expert data from the Gym Hopper environment",
+
+			id: "edbeeching/decision-transformer-gym-hopper-expert",
+		},
+		{
+			description: "A PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and the RL Zoo.",
+			id: "HumanCompatibleAI/ppo-seals-CartPole-v0",
+		},
+	],
+	spaces: [
+		{
+			description: "An application for a cute puppy agent learning to catch a stick.",
+			id: "ThomasSimonini/Huggy",
+		},
+		{
+			description: "An application to play Snowball Fight with a reinforcement learning agent.",
+			id: "ThomasSimonini/SnowballFight",
+		},
+	],
+	summary:
+		"Reinforcement learning is the computational approach of learning from action by interacting with an environment through trial and error and receiving rewards (negative or positive) as feedback",
+	widgetModels: [],
+	youtubeId: "q0BiUn5LiBc",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/sentence-similarity/about.md b/packages/tasks/src/sentence-similarity/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..ee536235d610cd144c1681ef4618c47ecce2a15c
--- /dev/null
+++ b/packages/tasks/src/sentence-similarity/about.md
@@ -0,0 +1,97 @@
+## Use Cases 🔍
+
+### Information Retrieval
+
+You can extract information from documents using Sentence Similarity models. The first step is to rank documents using Passage Ranking models. You can then get to the top ranked document and search it with Sentence Similarity models by selecting the sentence that has the most similarity to the input query.
+
+## The Sentence Transformers library
+
+The [Sentence Transformers](https://www.sbert.net/) library is very powerful for calculating embeddings of sentences, paragraphs, and entire documents. An embedding is just a vector representation of a text and is useful for finding how similar two texts are.
+
+You can find and use [hundreds of Sentence Transformers](https://huggingface.co/models?library=sentence-transformers&sort=downloads) models from the Hub by directly using the library, playing with the widgets in the browser or using the Inference API.
+
+## Task Variants
+
+### Passage Ranking
+
+Passage Ranking is the task of ranking documents based on their relevance to a given query. The task is evaluated on Mean Reciprocal Rank. These models take one query and multiple documents and return ranked documents according to the relevancy to the query. 📄
+
+You can infer with Passage Ranking models using the [Inference API](https://huggingface.co/inference-api). The Passage Ranking model inputs are a query for which we look for relevancy in the documents and the documents we want to search. The model will return scores according to the relevancy of these documents for the query.
+
+```python
+import json
+import requests
+
+API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/msmarco-distilbert-base-tas-b"
+headers = {"Authorization": f"Bearer {api_token}"}
+
+def query(payload):
+    response = requests.post(API_URL, headers=headers, json=payload)
+    return response.json()
+
+data = query(
+    {
+        "inputs": {
+            "source_sentence": "That is a happy person",
+            "sentences": [
+                "That is a happy dog",
+                "That is a very happy person",
+                "Today is a sunny day"
+            ]
+        }
+    }
+## [0.853, 0.981, 0.655]
+```
+
+### Semantic Textual Similarity
+
+Semantic Textual Similarity is the task of evaluating how similar two texts are in terms of meaning. These models take a source sentence and a list of sentences in which we will look for similarities and will return a list of similarity scores. The benchmark dataset is the [Semantic Textual Similarity Benchmark](http://ixa2.si.ehu.eus/stswiki/index.php/STSbenchmark). The task is evaluated on Pearson’s Rank Correlation.
+
+```python
+import json
+import requests
+
+API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/all-MiniLM-L6-v2"
+headers = {"Authorization": f"Bearer {api_token}"}
+
+def query(payload):
+    response = requests.post(API_URL, headers=headers, json=payload)
+    return response.json()
+
+data = query(
+    {
+        "inputs": {
+            "source_sentence": "I'm very happy",
+            "sentences":["I'm filled with happiness", "I'm happy"]
+        }
+    })
+
+## [0.605, 0.894]
+```
+
+You can also infer with the models in the Hub using Sentence Transformer models.
+
+```python
+pip install -U sentence-transformers
+
+from sentence_transformers import SentenceTransformer, util
+sentences = ["I'm happy", "I'm full of happiness"]
+
+model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
+
+#Compute embedding for both lists
+embedding_1= model.encode(sentences[0], convert_to_tensor=True)
+embedding_2 = model.encode(sentences[1], convert_to_tensor=True)
+
+util.pytorch_cos_sim(embedding_1, embedding_2)
+## tensor([[0.6003]])
+```
+
+## Useful Resources
+
+Would you like to learn more about Sentence Transformers and Sentence Similarity? Awesome! Here you can find some curated resources that you may find helpful!
+
+- [Sentence Transformers Documentation](https://www.sbert.net/)
+- [Sentence Transformers in the Hub](https://huggingface.co/blog/sentence-transformers-in-the-hub)
+- [Building a Playlist Generator with Sentence Transformers](https://huggingface.co/blog/playlist-generator)
+- [Getting Started With Embeddings](https://huggingface.co/blog/getting-started-with-embeddings)
diff --git a/packages/tasks/src/sentence-similarity/data.ts b/packages/tasks/src/sentence-similarity/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..0f71b39468e209b3faf3690c8d0a1f827d9212c9
--- /dev/null
+++ b/packages/tasks/src/sentence-similarity/data.ts
@@ -0,0 +1,101 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "Bing queries with relevant passages from various web sources.",
+			id: "ms_marco",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Source sentence",
+				content: "Machine learning is so easy.",
+				type: "text",
+			},
+			{
+				label: "Sentences to compare to",
+				content: "Deep learning is so straightforward.",
+				type: "text",
+			},
+			{
+				label: "",
+				content: "This is so difficult, like rocket science.",
+				type: "text",
+			},
+			{
+				label: "",
+				content: "I can't believe how much I struggled with this.",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				type: "chart",
+				data: [
+					{
+						label: "Deep learning is so straightforward.",
+						score: 0.623,
+					},
+					{
+						label: "This is so difficult, like rocket science.",
+						score: 0.413,
+					},
+					{
+						label: "I can't believe how much I struggled with this.",
+						score: 0.256,
+					},
+				],
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"Reciprocal Rank is a measure used to rank the relevancy of documents given a set of documents. Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal Rank is 1",
+			id: "Mean Reciprocal Rank",
+		},
+		{
+			description:
+				"The similarity of the embeddings is evaluated mainly on cosine similarity. It is calculated as the cosine of the angle between two vectors. It is particularly useful when your texts are not the same length",
+			id: "Cosine Similarity",
+		},
+	],
+	models: [
+		{
+			description:
+				"This model works well for sentences and paragraphs and can be used for clustering/grouping and semantic searches.",
+			id: "sentence-transformers/all-mpnet-base-v2",
+		},
+		{
+			description: "A multilingual model trained for FAQ retrieval.",
+			id: "clips/mfaq",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that leverages sentence similarity to answer questions from YouTube videos.",
+			id: "Gradio-Blocks/Ask_Questions_To_YouTube_Videos",
+		},
+		{
+			description:
+				"An application that retrieves relevant PubMed abstracts for a given online article which can be used as further references.",
+			id: "Gradio-Blocks/pubmed-abstract-retriever",
+		},
+		{
+			description: "An application that leverages sentence similarity to summarize text.",
+			id: "nickmuchi/article-text-summarizer",
+		},
+		{
+			description: "A guide that explains how Sentence Transformers can be used for semantic search.",
+			id: "sentence-transformers/Sentence_Transformers_for_semantic_search",
+		},
+	],
+	summary:
+		"Sentence Similarity is the task of determining how similar two texts are. Sentence similarity models convert input texts into vectors (embeddings) that capture semantic information and calculate how close (similar) they are between them. This task is particularly useful for information retrieval and clustering/grouping.",
+	widgetModels: ["sentence-transformers/all-MiniLM-L6-v2"],
+	youtubeId: "VCZq5AkbNEU",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/summarization/about.md b/packages/tasks/src/summarization/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..ec82c946f383457d311c5721ee75c96fa8047c87
--- /dev/null
+++ b/packages/tasks/src/summarization/about.md
@@ -0,0 +1,58 @@
+## Use Cases
+
+### Research Paper Summarization 🧐
+
+Research papers can be summarized to allow researchers to spend less time selecting which articles to read. There are several approaches you can take for a task like this:
+
+1. Use an existing extractive summarization model on the Hub to do inference.
+2. Pick an existing language model trained for academic papers. This model can then be trained in a process called fine-tuning so it can solve the summarization task.
+3. Use a sequence-to-sequence model like [T5](https://huggingface.co/docs/transformers/model_doc/t5) for abstractive text summarization.
+
+## Inference
+
+You can use the 🤗 Transformers library `summarization` pipeline to infer with existing Summarization models. If no model name is provided the pipeline will be initialized with [sshleifer/distilbart-cnn-12-6](https://huggingface.co/sshleifer/distilbart-cnn-12-6).
+
+```python
+from transformers import pipeline
+
+classifier = pipeline("summarization")
+classifier("Paris is the capital and most populous city of France, with an estimated population of 2,175,601 residents as of 2018, in an area of more than 105 square kilometres (41 square miles). The City of Paris is the centre and seat of government of the region and province of Île-de-France, or Paris Region, which has an estimated population of 12,174,880, or about 18 percent of the population of France as of 2017.")
+## [{ "summary_text": " Paris is the capital and most populous city of France..." }]
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer summarization models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+const inputs =
+	"Paris is the capital and most populous city of France, with an estimated population of 2,175,601 residents as of 2018, in an area of more than 105 square kilometres (41 square miles). The City of Paris is the centre and seat of government of the region and province of Île-de-France, or Paris Region, which has an estimated population of 12,174,880, or about 18 percent of the population of France as of 2017.";
+
+await inference.summarization({
+	model: "sshleifer/distilbart-cnn-12-6",
+	inputs,
+});
+```
+
+## Useful Resources
+
+Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful!
+
+- [Course Chapter on Summarization](https://huggingface.co/course/chapter7/5?fw=pt)
+- [Distributed Training: Train BART/T5 for Summarization using 🤗 Transformers and Amazon SageMaker](https://huggingface.co/blog/sagemaker-distributed-training-seq2seq)
+
+### Notebooks
+
+- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/summarization.ipynb)
+- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb)
+
+### Scripts for training
+
+- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization)
+- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization)
+- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/summarization)
+
+### Documentation
+
+- [Summarization task guide](https://huggingface.co/docs/transformers/tasks/summarization)
diff --git a/packages/tasks/src/summarization/data.ts b/packages/tasks/src/summarization/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..d0afc85282a6ec9732b9ae56487a4355ca7872c9
--- /dev/null
+++ b/packages/tasks/src/summarization/data.ts
@@ -0,0 +1,75 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description:
+				"News articles in five different languages along with their summaries. Widely used for benchmarking multilingual summarization models.",
+			id: "mlsum",
+		},
+		{
+			description: "English conversations and their summaries. Useful for benchmarking conversational agents.",
+			id: "samsum",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content:
+					"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. It was the first structure to reach a height of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				label: "Output",
+				content:
+					"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building. It was the first structure to reach a height of 300 metres.",
+				type: "text",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"The generated sequence is compared against its summary, and the overlap of tokens are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.",
+			id: "rouge",
+		},
+	],
+	models: [
+		{
+			description:
+				"A strong summarization model trained on English news articles. Excels at generating factual summaries.",
+			id: "facebook/bart-large-cnn",
+		},
+		{
+			description: "A summarization model trained on medical articles.",
+			id: "google/bigbird-pegasus-large-pubmed",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that can summarize long paragraphs.",
+			id: "pszemraj/summarize-long-text",
+		},
+		{
+			description: "A much needed summarization application for terms and conditions.",
+			id: "ml6team/distilbart-tos-summarizer-tosdr",
+		},
+		{
+			description: "An application that summarizes long documents.",
+			id: "pszemraj/document-summarization",
+		},
+		{
+			description: "An application that can detect errors in abstractive summarization.",
+			id: "ml6team/post-processing-summarization",
+		},
+	],
+	summary:
+		"Summarization is the task of producing a shorter version of a document while preserving its important information. Some models can extract text from the original input, while other models can generate entirely new text.",
+	widgetModels: ["sshleifer/distilbart-cnn-12-6"],
+	youtubeId: "yHnr5Dk2zCI",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/table-question-answering/about.md b/packages/tasks/src/table-question-answering/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..684c85c22e3f196b1801ba6fcd0868b1f7ccb77b
--- /dev/null
+++ b/packages/tasks/src/table-question-answering/about.md
@@ -0,0 +1,43 @@
+## Use Cases
+
+### SQL execution
+
+You can use the Table Question Answering models to simulate SQL execution by inputting a table.
+
+### Table Question Answering
+
+Table Question Answering models are capable of answering questions based on a table.
+
+## Task Variants
+
+This place can be filled with variants of this task if there's any.
+
+## Inference
+
+You can infer with TableQA models using the 🤗 Transformers library.
+
+```python
+from transformers import pipeline
+import pandas as pd
+
+# prepare table + question
+data = {"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]}
+table = pd.DataFrame.from_dict(data)
+question = "how many movies does Leonardo Di Caprio have?"
+
+# pipeline model
+# Note: you must to install torch-scatter first.
+tqa = pipeline(task="table-question-answering", model="google/tapas-large-finetuned-wtq")
+
+# result
+
+print(tqa(table=table, query=query)['cells'][0])
+#53
+
+```
+
+## Useful Resources
+
+In this area, you can insert useful resources about how to train or use a model for this task.
+
+This task page is complete thanks to the efforts of [Hao Kim Tieu](https://huggingface.co/haotieu). 🦸
diff --git a/packages/tasks/src/table-question-answering/data.ts b/packages/tasks/src/table-question-answering/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..6ad9fd0afbe82d4ef0c2454c4fa29b865e62461c
--- /dev/null
+++ b/packages/tasks/src/table-question-answering/data.ts
@@ -0,0 +1,59 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description:
+				"The WikiTableQuestions dataset is a large-scale dataset for the task of question answering on semi-structured tables.",
+			id: "wikitablequestions",
+		},
+		{
+			description:
+				"WikiSQL is a dataset of 80654 hand-annotated examples of questions and SQL queries distributed across 24241 tables from Wikipedia.",
+			id: "wikisql",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				table: [
+					["Rank", "Name", "No.of reigns", "Combined days"],
+					["1", "lou Thesz", "3", "3749"],
+					["2", "Ric Flair", "8", "3103"],
+					["3", "Harley Race", "7", "1799"],
+				],
+				type: "tabular",
+			},
+
+			{ label: "Question", content: "What is the number of reigns for Harley Race?", type: "text" },
+		],
+		outputs: [{ label: "Result", content: "7", type: "text" }],
+	},
+	metrics: [
+		{
+			description: "Checks whether the predicted answer(s) is the same as the ground-truth answer(s).",
+			id: "Denotation Accuracy",
+		},
+	],
+	models: [
+		{
+			description:
+				"A table question answering model that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL query on a given table.",
+			id: "microsoft/tapex-base",
+		},
+		{
+			description: "A robust table question answering model.",
+			id: "google/tapas-base-finetuned-wtq",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that answers questions based on table CSV files.",
+			id: "katanaml/table-query",
+		},
+	],
+	summary: "Table Question Answering (Table QA) is the answering a question about an information on a given table.",
+	widgetModels: ["google/tapas-base-finetuned-wtq"],
+};
+
+export default taskData;
diff --git a/packages/tasks/src/tabular-classification/about.md b/packages/tasks/src/tabular-classification/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..9af38bceec2a4c49842dac8926950359485619b4
--- /dev/null
+++ b/packages/tasks/src/tabular-classification/about.md
@@ -0,0 +1,65 @@
+## About the Task
+
+Tabular classification is the task of assigning a label or class given a limited number of attributes. For example, the input can be data related to a customer (balance of the customer, the time being a customer, or more) and the output can be whether the customer will churn from the service or not.
+There are three types of categorical variables:
+
+- Binary variables: Variables that can take two values, like yes or no, open or closed. The task of predicting binary variables is called binary classification.
+- Ordinal variables: Variables with a ranking relationship, e.g., good, insignificant, and bad product reviews. The task of predicting ordinal variables is called ordinal classification.
+- Nominal variables: Variables with no ranking relationship among them, e.g., predicting an animal from their weight and height, where categories are cat, dog, or bird. The task of predicting nominal variables is called multinomial classification.
+
+## Use Cases
+
+### Fraud Detection
+Tabular classification models can be used in detecting fraudulent credit card transactions, where the features could be the amount of the transaction and the account balance, and the target to predict could be whether the transaction is fraudulent or not. This is an example of binary classification.
+
+### Churn Prediction
+Tabular classification models can be used in predicting customer churn in telecommunication. An example dataset for the task is hosted [here](https://huggingface.co/datasets/scikit-learn/churn-prediction).
+
+# Model Hosting and Inference
+
+You can use [skops](https://skops.readthedocs.io/) for model hosting and inference on the Hugging Face Hub. This library is built to improve production workflows of various libraries that are used to train tabular models, including [sklearn](https://scikit-learn.org/stable/) and [xgboost](https://xgboost.readthedocs.io/en/stable/). Using `skops` you can:
+
+- Easily use inference API,
+- Build neat UIs with one line of code,
+- Programmatically create model cards,
+- Securely serialize your scikit-learn model. (See limitations of using pickle [here](https://huggingface.co/docs/hub/security-pickle).)
+
+You can push your model as follows:
+
+```python
+from skops import hub_utils
+# initialize a repository with a trained model
+local_repo = "/path_to_new_repo"
+hub_utils.init(model, dst=local_repo)
+# push to Hub!
+hub_utils.push("username/my-awesome-model", source=local_repo)
+```
+
+Once the model is pushed, you can infer easily.
+
+```python
+import skops.hub_utils as hub_utils
+import pandas as pd
+data = pd.DataFrame(your_data)
+# Load the model from the Hub
+res = hub_utils.get_model_output("username/my-awesome-model", data)
+```
+
+You can launch a UI for your model with only one line of code!
+
+```python
+import gradio as gr
+gr.Interface.load("huggingface/username/my-awesome-model").launch()
+```
+
+## Useful Resources
+
+- Check out the [scikit-learn organization](https://huggingface.co/scikit-learn) to learn more about different algorithms used for this task.
+- [Skops documentation](https://skops.readthedocs.io/en/latest/)
+- [Skops announcement blog](https://huggingface.co/blog/skops)
+- [Notebook: Persisting your scikit-learn model using skops](https://www.kaggle.com/code/unofficialmerve/persisting-your-scikit-learn-model-using-skops)
+- Check out [interactive sklearn examples](https://huggingface.co/sklearn-docs) built with ❤️ using Gradio.
+
+### Training your own model in just a few seconds
+
+We have built a [baseline trainer](https://huggingface.co/spaces/scikit-learn/baseline-trainer) application to which you can drag and drop your dataset. It will train a baseline and push it to your Hugging Face Hub profile with a model card containing information about the model.
diff --git a/packages/tasks/src/tabular-classification/data.ts b/packages/tasks/src/tabular-classification/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..5cc3f4b9a98066d9df0c62c8d284f70dfa883db8
--- /dev/null
+++ b/packages/tasks/src/tabular-classification/data.ts
@@ -0,0 +1,68 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A comprehensive curation of datasets covering all benchmarks.",
+			id: "inria-soda/tabular-benchmark",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				table: [
+					["Glucose", "Blood Pressure ", "Skin Thickness", "Insulin", "BMI"],
+					["148", "72", "35", "0", "33.6"],
+					["150", "50", "30", "0", "35.1"],
+					["141", "60", "29", "1", "39.2"],
+				],
+				type: "tabular",
+			},
+		],
+		outputs: [
+			{
+				table: [["Diabetes"], ["1"], ["1"], ["0"]],
+				type: "tabular",
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "",
+			id: "accuracy",
+		},
+		{
+			description: "",
+			id: "recall",
+		},
+		{
+			description: "",
+			id: "precision",
+		},
+		{
+			description: "",
+			id: "f1",
+		},
+	],
+	models: [
+		{
+			description: "Breast cancer prediction model based on decision trees.",
+			id: "scikit-learn/cancer-prediction-trees",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that can predict defective products on a production line.",
+			id: "scikit-learn/tabular-playground",
+		},
+		{
+			description: "An application that compares various tabular classification techniques on different datasets.",
+			id: "scikit-learn/classification",
+		},
+	],
+	summary: "Tabular classification is the task of classifying a target category (a group) based on set of attributes.",
+	widgetModels: ["scikit-learn/tabular-playground"],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/tabular-regression/about.md b/packages/tasks/src/tabular-regression/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..eb8861779047fe6073dbbf5baeca7655adddb66f
--- /dev/null
+++ b/packages/tasks/src/tabular-regression/about.md
@@ -0,0 +1,87 @@
+## About the Task
+
+Tabular regression is the task of predicting a numerical value given a set of attributes/features. _Tabular_ meaning that data is stored in a table (like an excel sheet), and each sample is contained in its own row. The features used to predict our target can be both numerical and categorical. However, including categorical features often requires additional preprocessing/feature engineering (a few models do accept categorical features directly, like [CatBoost](https://catboost.ai/)). An example of tabular regression would be predicting the weight of a fish given its' species and length.
+
+## Use Cases
+
+### Sales Prediction: a Use Case for Predicting a Continuous Target Variable
+
+Here the objective is to predict a continuous variable based on a set of input variable(s). For example, predicting `sales` of an ice cream shop based on `temperature` of weather and `duration of hours` shop was open. Here we can build a regression model with `temperature` and `duration of hours` as input variable and `sales` as target variable.
+
+### Missing Value Imputation for Other Tabular Tasks
+In real-world applications, due to human error or other reasons, some of the input values can be missing or there might not be any recorded data. Considering the example above, say the shopkeeper's watch was broken and they forgot to calculate the `hours` for which the shop was open. This will lead to a missing value in their dataset. In this case, missing values could be replaced it with zero, or average hours for which the shop is kept open. Another approach we can try is to use `temperature` and `sales` variables to predict the `hours` variable here.
+
+## Model Training
+
+A simple regression model can be created using `sklearn` as follows:
+
+```python
+#set the input features
+X = data[["Feature 1", "Feature 2", "Feature 3"]]
+#set the target variable
+y = data["Target Variable"]
+#initialize the model
+model = LinearRegression()
+#Fit the model
+model.fit(X, y)
+```
+
+# Model Hosting and Inference
+
+You can use [skops](https://skops.readthedocs.io/) for model hosting and inference on the Hugging Face Hub. This library is built to improve production workflows of various libraries that are used to train tabular models, including [sklearn](https://scikit-learn.org/stable/) and [xgboost](https://xgboost.readthedocs.io/en/stable/). Using `skops` you can:
+
+- Easily use inference API,
+- Build neat UIs with one line of code,
+- Programmatically create model cards,
+- Securely serialize your models. (See limitations of using pickle [here](https://huggingface.co/docs/hub/security-pickle).)
+
+You can push your model as follows:
+
+```python
+from skops import hub_utils
+# initialize a repository with a trained model
+local_repo = "/path_to_new_repo"
+hub_utils.init(model, dst=local_repo)
+# push to Hub!
+hub_utils.push("username/my-awesome-model", source=local_repo)
+```
+
+Once the model is pushed, you can infer easily.
+
+```python
+import skops.hub_utils as hub_utils
+import pandas as pd
+data = pd.DataFrame(your_data)
+# Load the model from the Hub
+res = hub_utils.get_model_output("username/my-awesome-model", data)
+```
+
+You can launch a UI for your model with only one line of code!
+
+```python
+import gradio as gr
+gr.Interface.load("huggingface/username/my-awesome-model").launch()
+```
+
+## Useful Resources
+
+- [Skops documentation](https://skops.readthedocs.io/en/stable/index.html)
+
+- Check out [interactive sklearn examples](https://huggingface.co/sklearn-docs) built with ❤️ using Gradio.
+- [Notebook: Persisting your scikit-learn model using skops](https://www.kaggle.com/code/unofficialmerve/persisting-your-scikit-learn-model-using-skops)
+
+- For starting with tabular regression:
+
+  - Doing [Exploratory Data Analysis](https://neptune.ai/blog/exploratory-data-analysis-for-tabular-data) for tabular data.
+    - The data considered here consists of details of Olympic athletes and medal results from Athens 1896 to Rio 2016.
+    - Here you can learn more about how to explore and analyse the data and visualize them in order to get a better understanding of dataset.
+  - Building your [first ML model](https://www.kaggle.com/code/dansbecker/your-first-machine-learning-model).
+
+- Intermediate level tutorials on tabular regression:
+  - [A Short Chronology of Deep Learning for Tabular Data](https://sebastianraschka.com/blog/2022/deep-learning-for-tabular-data.html) by Sebastian Raschka.
+
+### Training your own model in just a few seconds
+
+We have built a [baseline trainer](https://huggingface.co/spaces/scikit-learn/baseline-trainer) application to which you can drag and drop your dataset. It will train a baseline and push it to your Hugging Face Hub profile with a model card containing information about the model.
+
+This page was made possible thanks to efforts of [Brenden Connors](https://huggingface.co/brendenc) and [Ayush Bihani](https://huggingface.co/hsuyab).
diff --git a/packages/tasks/src/tabular-regression/data.ts b/packages/tasks/src/tabular-regression/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..c5b78753861505113a4a5eab8ab31939dbcf4458
--- /dev/null
+++ b/packages/tasks/src/tabular-regression/data.ts
@@ -0,0 +1,57 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A comprehensive curation of datasets covering all benchmarks.",
+			id: "inria-soda/tabular-benchmark",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				table: [
+					["Car Name", "Horsepower", "Weight"],
+					["ford torino", "140", "3,449"],
+					["amc hornet", "97", "2,774"],
+					["toyota corolla", "65", "1,773"],
+				],
+				type: "tabular",
+			},
+		],
+		outputs: [
+			{
+				table: [["MPG (miles per gallon)"], ["17"], ["18"], ["31"]],
+				type: "tabular",
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "",
+			id: "mse",
+		},
+		{
+			description:
+				"Coefficient of determination (or R-squared) is a measure of how well the model fits the data. Higher R-squared is considered a better fit.",
+			id: "r-squared",
+		},
+	],
+	models: [
+		{
+			description: "Fish weight prediction based on length measurements and species.",
+			id: "scikit-learn/Fish-Weight",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that can predict weight of a fish based on set of attributes.",
+			id: "scikit-learn/fish-weight-prediction",
+		},
+	],
+	summary: "Tabular regression is the task of predicting a numerical value given a set of attributes.",
+	widgetModels: ["scikit-learn/Fish-Weight"],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/tasksData.ts b/packages/tasks/src/tasksData.ts
new file mode 100644
index 0000000000000000000000000000000000000000..db2609971100e91d25ba68a075e3ce48f88c787f
--- /dev/null
+++ b/packages/tasks/src/tasksData.ts
@@ -0,0 +1,101 @@
+import { type PipelineType, PIPELINE_DATA } from "./pipelines";
+import type { TaskDataCustom, TaskData } from "./Types";
+
+import audioClassification from "./audio-classification/data";
+import audioToAudio from "./audio-to-audio/data";
+import automaticSpeechRecognition from "./automatic-speech-recognition/data";
+import conversational from "./conversational/data";
+import documentQuestionAnswering from "./document-question-answering/data";
+import featureExtraction from "./feature-extraction/data";
+import fillMask from "./fill-mask/data";
+import imageClassification from "./image-classification/data";
+import imageToImage from "./image-to-image/data";
+import imageToText from "./image-to-text/data";
+import imageSegmentation from "./image-segmentation/data";
+import objectDetection from "./object-detection/data";
+import depthEstimation from "./depth-estimation/data";
+import placeholder from "./placeholder/data";
+import reinforcementLearning from "./reinforcement-learning/data";
+import questionAnswering from "./question-answering/data";
+import sentenceSimilarity from "./sentence-similarity/data";
+import summarization from "./summarization/data";
+import tableQuestionAnswering from "./table-question-answering/data";
+import tabularClassification from "./tabular-classification/data";
+import tabularRegression from "./tabular-regression/data";
+import textToImage from "./text-to-image/data";
+import textToSpeech from "./text-to-speech/data";
+import tokenClassification from "./token-classification/data";
+import translation from "./translation/data";
+import textClassification from "./text-classification/data";
+import textGeneration from "./text-generation/data";
+import textToVideo from "./text-to-video/data";
+import unconditionalImageGeneration from "./unconditional-image-generation/data";
+import videoClassification from "./video-classification/data";
+import visualQuestionAnswering from "./visual-question-answering/data";
+import zeroShotClassification from "./zero-shot-classification/data";
+import zeroShotImageClassification from "./zero-shot-image-classification/data";
+import { TASKS_MODEL_LIBRARIES } from "./const";
+
+// To make comparisons easier, task order is the same as in const.ts
+// Tasks set to undefined won't have an associated task page.
+// Tasks that call getData() without the second argument will
+// have a "placeholder" page.
+export const TASKS_DATA: Record<PipelineType, TaskData | undefined> = {
+	"audio-classification": getData("audio-classification", audioClassification),
+	"audio-to-audio": getData("audio-to-audio", audioToAudio),
+	"automatic-speech-recognition": getData("automatic-speech-recognition", automaticSpeechRecognition),
+	conversational: getData("conversational", conversational),
+	"depth-estimation": getData("depth-estimation", depthEstimation),
+	"document-question-answering": getData("document-question-answering", documentQuestionAnswering),
+	"feature-extraction": getData("feature-extraction", featureExtraction),
+	"fill-mask": getData("fill-mask", fillMask),
+	"graph-ml": undefined,
+	"image-classification": getData("image-classification", imageClassification),
+	"image-segmentation": getData("image-segmentation", imageSegmentation),
+	"image-to-image": getData("image-to-image", imageToImage),
+	"image-to-text": getData("image-to-text", imageToText),
+	"multiple-choice": undefined,
+	"object-detection": getData("object-detection", objectDetection),
+	"video-classification": getData("video-classification", videoClassification),
+	other: undefined,
+	"question-answering": getData("question-answering", questionAnswering),
+	"reinforcement-learning": getData("reinforcement-learning", reinforcementLearning),
+	robotics: undefined,
+	"sentence-similarity": getData("sentence-similarity", sentenceSimilarity),
+	summarization: getData("summarization", summarization),
+	"table-question-answering": getData("table-question-answering", tableQuestionAnswering),
+	"table-to-text": undefined,
+	"tabular-classification": getData("tabular-classification", tabularClassification),
+	"tabular-regression": getData("tabular-regression", tabularRegression),
+	"tabular-to-text": undefined,
+	"text-classification": getData("text-classification", textClassification),
+	"text-generation": getData("text-generation", textGeneration),
+	"text-retrieval": undefined,
+	"text-to-image": getData("text-to-image", textToImage),
+	"text-to-speech": getData("text-to-speech", textToSpeech),
+	"text-to-audio": undefined,
+	"text-to-video": getData("text-to-video", textToVideo),
+	"text2text-generation": undefined,
+	"time-series-forecasting": undefined,
+	"token-classification": getData("token-classification", tokenClassification),
+	translation: getData("translation", translation),
+	"unconditional-image-generation": getData("unconditional-image-generation", unconditionalImageGeneration),
+	"visual-question-answering": getData("visual-question-answering", visualQuestionAnswering),
+	"voice-activity-detection": undefined,
+	"zero-shot-classification": getData("zero-shot-classification", zeroShotClassification),
+	"zero-shot-image-classification": getData("zero-shot-image-classification", zeroShotImageClassification),
+} as const;
+
+/**
+ * Return the whole TaskData object for a certain task.
+ * If the partialTaskData argument is left undefined,
+ * the default placholder data will be used.
+ */
+function getData(type: PipelineType, partialTaskData: TaskDataCustom = placeholder): TaskData {
+	return {
+		...partialTaskData,
+		id: type,
+		label: PIPELINE_DATA[type].name,
+		libraries: TASKS_MODEL_LIBRARIES[type],
+	};
+}
diff --git a/packages/tasks/src/text-classification/about.md b/packages/tasks/src/text-classification/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..448eb7138209974bbb5d282fb573f1c8197e38f3
--- /dev/null
+++ b/packages/tasks/src/text-classification/about.md
@@ -0,0 +1,172 @@
+## Use Cases
+
+### Sentiment Analysis on Customer Reviews
+
+You can track the sentiments of your customers from the product reviews using sentiment analysis models. This can help understand churn and retention by grouping reviews by sentiment, to later analyze the text and make strategic decisions based on this knowledge.
+
+## Task Variants
+
+### Natural Language Inference (NLI)
+
+In NLI the model determines the relationship between two given texts. Concretely, the model takes a premise and a hypothesis and returns a class that can either be:
+
+- **entailment**, which means the hypothesis is true.
+- **contraction**, which means the hypothesis is false.
+- **neutral**, which means there's no relation between the hypothesis and the premise.
+
+The benchmark dataset for this task is GLUE (General Language Understanding Evaluation). NLI models have different variants, such as Multi-Genre NLI, Question NLI and Winograd NLI.
+
+### Multi-Genre NLI (MNLI)
+
+MNLI is used for general NLI. Here are som examples:
+
+```
+Example 1:
+    Premise: A man inspects the uniform of a figure in some East Asian country.
+    Hypothesis: The man is sleeping.
+    Label: Contradiction
+
+Example 2:
+    Premise: Soccer game with multiple males playing.
+    Hypothesis: Some men are playing a sport.
+    Label: Entailment
+```
+
+#### Inference
+
+You can use the 🤗 Transformers library `text-classification` pipeline to infer with NLI models.
+
+```python
+from transformers import pipeline
+
+classifier = pipeline("text-classification", model = "roberta-large-mnli")
+classifier("A soccer game with multiple males playing. Some men are playing a sport.")
+## [{'label': 'ENTAILMENT', 'score': 0.98}]
+```
+
+### Question Natural Language Inference (QNLI)
+
+QNLI is the task of determining if the answer to a certain question can be found in a given document. If the answer can be found the label is “entailment”. If the answer cannot be found the label is “not entailment".
+
+```
+Question: What percentage of marine life died during the extinction?
+Sentence: It is also known as the “Great Dying” because it is considered the largest mass extinction in the Earth’s history.
+Label: not entailment
+
+Question: Who was the London Weekend Television’s Managing Director?
+Sentence: The managing director of London Weekend Television (LWT), Greg Dyke, met with the representatives of the "big five" football clubs in England in 1990.
+Label: entailment
+```
+
+#### Inference
+
+You can use the 🤗 Transformers library `text-classification` pipeline to infer with QNLI models. The model returns the label and the confidence.
+
+```python
+from transformers import pipeline
+
+classifier = pipeline("text-classification", model = "cross-encoder/qnli-electra-base")
+classifier("Where is the capital of France?, Paris is the capital of France.")
+## [{'label': 'entailment', 'score': 0.997}]
+```
+
+### Sentiment Analysis
+
+In Sentiment Analysis, the classes can be polarities like positive, negative, neutral, or sentiments such as happiness or anger.
+
+#### Inference
+
+You can use the 🤗 Transformers library with the `sentiment-analysis` pipeline to infer with Sentiment Analysis models. The model returns the label with the score.
+
+```python
+from transformers import pipeline
+
+classifier = pipeline("sentiment-analysis")
+classifier("I loved Star Wars so much!")
+##  [{'label': 'POSITIVE', 'score': 0.99}
+```
+
+### Quora Question Pairs
+
+Quora Question Pairs models assess whether two provided questions are paraphrases of each other. The model takes two questions and returns a binary value, with 0 being mapped to “not paraphrase” and 1 to “paraphrase". The benchmark dataset is [Quora Question Pairs](https://huggingface.co/datasets/glue/viewer/qqp/test) inside the [GLUE benchmark](https://huggingface.co/datasets/glue). The dataset consists of question pairs and their labels.
+
+```
+Question1: “How can I increase the speed of my internet connection while using a VPN?”
+Question2: How can Internet speed be increased by hacking through DNS?
+Label: Not paraphrase
+
+Question1: “What can make Physics easy to learn?”
+Question2: “How can you make physics easy to learn?”
+Label: Paraphrase
+```
+
+#### Inference
+
+You can use the 🤗 Transformers library `text-classification` pipeline to infer with QQPI models.
+
+```python
+from transformers import pipeline
+
+classifier = pipeline("text-classification", model = "textattack/bert-base-uncased-QQP")
+classifier("Which city is the capital of France?, Where is the capital of France?")
+## [{'label': 'paraphrase', 'score': 0.998}]
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer text classification models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.conversational({
+	model: "distilbert-base-uncased-finetuned-sst-2-english",
+	inputs: "I love this movie!",
+});
+```
+
+### Grammatical Correctness
+
+Linguistic Acceptability is the task of assessing the grammatical acceptability of a sentence. The classes in this task are “acceptable” and “unacceptable”. The benchmark dataset used for this task is [Corpus of Linguistic Acceptability (CoLA)](https://huggingface.co/datasets/glue/viewer/cola/test). The dataset consists of texts and their labels.
+
+```
+Example: Books were sent to each other by the students.
+Label: Unacceptable
+
+Example: She voted for herself.
+Label: Acceptable.
+```
+
+#### Inference
+
+```python
+from transformers import pipeline
+
+classifier = pipeline("text-classification", model = "textattack/distilbert-base-uncased-CoLA")
+classifier("I will walk to home when I went through the bus.")
+##  [{'label': 'unacceptable', 'score': 0.95}]
+```
+
+## Useful Resources
+
+Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful!
+
+- [Course Chapter on Fine-tuning a Text Classification Model](https://huggingface.co/course/chapter3/1?fw=pt)
+- [Getting Started with Sentiment Analysis using Python](https://huggingface.co/blog/sentiment-analysis-python)
+- [Sentiment Analysis on Encrypted Data with Homomorphic Encryption](https://huggingface.co/blog/sentiment-analysis-fhe)
+- [Leveraging Hugging Face for complex text classification use cases](https://huggingface.co/blog/classification-use-cases)
+
+### Notebooks
+
+- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/text_classification.ipynb)
+- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb)
+- [Flax](https://github.com/huggingface/notebooks/blob/master/examples/text_classification_flax.ipynb)
+
+### Scripts for training
+
+- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification)
+- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification)
+- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification)
+
+### Documentation
+
+- [Text classification task guide](https://huggingface.co/docs/transformers/tasks/sequence_classification)
diff --git a/packages/tasks/src/text-classification/data.ts b/packages/tasks/src/text-classification/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..7893753caae5de3036692e32b8135a47f139924b
--- /dev/null
+++ b/packages/tasks/src/text-classification/data.ts
@@ -0,0 +1,91 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A widely used dataset used to benchmark multiple variants of text classification.",
+			id: "glue",
+		},
+		{
+			description: "A text classification dataset used to benchmark natural language inference models",
+			id: "snli",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "I love Hugging Face!",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				type: "chart",
+				data: [
+					{
+						label: "POSITIVE",
+						score: 0.9,
+					},
+					{
+						label: "NEUTRAL",
+						score: 0.1,
+					},
+					{
+						label: "NEGATIVE",
+						score: 0.0,
+					},
+				],
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "",
+			id: "accuracy",
+		},
+		{
+			description: "",
+			id: "recall",
+		},
+		{
+			description: "",
+			id: "precision",
+		},
+		{
+			description:
+				"The F1 metric is the harmonic mean of the precision and recall. It can be calculated as: F1 = 2 * (precision * recall) / (precision + recall)",
+			id: "f1",
+		},
+	],
+	models: [
+		{
+			description: "A robust model trained for sentiment analysis.",
+			id: "distilbert-base-uncased-finetuned-sst-2-english",
+		},
+		{
+			description: "Multi-genre natural language inference model.",
+			id: "roberta-large-mnli",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that can classify financial sentiment.",
+			id: "IoannisTr/Tech_Stocks_Trading_Assistant",
+		},
+		{
+			description: "A dashboard that contains various text classification tasks.",
+			id: "miesnerjacob/Multi-task-NLP",
+		},
+		{
+			description: "An application that analyzes user reviews in healthcare.",
+			id: "spacy/healthsea-demo",
+		},
+	],
+	summary:
+		"Text Classification is the task of assigning a label or class to a given text. Some use cases are sentiment analysis, natural language inference, and assessing grammatical correctness.",
+	widgetModels: ["distilbert-base-uncased-finetuned-sst-2-english"],
+	youtubeId: "leNG9fN9FQU",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/text-generation/about.md b/packages/tasks/src/text-generation/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..c8ed8120f65903a669af705beddcdb00ea814422
--- /dev/null
+++ b/packages/tasks/src/text-generation/about.md
@@ -0,0 +1,129 @@
+This task covers guides on both [text-generation](https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads) and [text-to-text generation](https://huggingface.co/models?pipeline_tag=text2text-generation&sort=downloads) models. Popular large language models that are used for chats or following instructions are also covered in this task. You can find the list of selected open-source large language models [here](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), ranked by their performance scores.
+
+## Use Cases
+
+### Instruction Models
+
+A model trained for text generation can be later adapted to follow instructions. One of the most used open-source models for instruction is OpenAssistant, which you can try [at Hugging Chat](https://huggingface.co/chat).
+
+### Code Generation
+
+A Text Generation model, also known as a causal language model, can be trained on code from scratch to help the programmers in their repetitive coding tasks. One of the most popular open-source models for code generation is StarCoder, which can generate code in 80+ languages. You can try it [here](https://huggingface.co/spaces/bigcode/bigcode-playground).
+
+### Stories Generation
+
+A story generation model can receive an input like "Once upon a time" and proceed to create a story-like text based on those first words. You can try [this application](https://huggingface.co/spaces/mosaicml/mpt-7b-storywriter) which contains a model trained on story generation, by MosaicML.
+
+If your generative model training data is different than your use case, you can train a causal language model from scratch. Learn how to do it in the free transformers [course](https://huggingface.co/course/chapter7/6?fw=pt)!
+
+## Task Variants
+
+### Completion Generation Models
+
+A popular variant of Text Generation models predicts the next word given a bunch of words. Word by word a longer text is formed that results in for example:
+
+- Given an incomplete sentence, complete it.
+- Continue a story given the first sentences.
+- Provided a code description, generate the code.
+
+The most popular models for this task are GPT-based models or [Llama series](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). These models are trained on data that has no labels, so you just need plain text to train your own model. You can train text generation models to generate a wide variety of documents, from code to stories.
+
+### Text-to-Text Generation Models
+
+These models are trained to learn the mapping between a pair of texts (e.g. translation from one language to another). The most popular variants of these models are [FLAN-T5](https://huggingface.co/google/flan-t5-xxl), and [BART](https://huggingface.co/docs/transformers/model_doc/bart). Text-to-Text models are trained with multi-tasking capabilities, they can accomplish a wide range of tasks, including summarization, translation, and text classification.
+
+## Inference
+
+You can use the 🤗 Transformers library `text-generation` pipeline to do inference with Text Generation models. It takes an incomplete text and returns multiple outputs with which the text can be completed.
+
+```python
+from transformers import pipeline
+generator = pipeline('text-generation', model = 'gpt2')
+generator("Hello, I'm a language model", max_length = 30, num_return_sequences=3)
+## [{'generated_text': "Hello, I'm a language modeler. So while writing this, when I went out to meet my wife or come home she told me that my"},
+##  {'generated_text': "Hello, I'm a language modeler. I write and maintain software in Python. I love to code, and that includes coding things that require writing"}, ...
+```
+
+[Text-to-Text generation models](https://huggingface.co/models?pipeline_tag=text2text-generation&sort=downloads) have a separate pipeline called `text2text-generation`. This pipeline takes an input containing the sentence including the task and returns the output of the accomplished task.
+
+```python
+from transformers import pipeline
+
+text2text_generator = pipeline("text2text-generation")
+text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
+[{'generated_text': 'the answer to life, the universe and everything'}]
+
+text2text_generator("translate from English to French: I'm very happy")
+[{'generated_text': 'Je suis très heureux'}]
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer text classification models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.conversational({
+	model: "distilbert-base-uncased-finetuned-sst-2-english",
+	inputs: "I love this movie!",
+});
+```
+
+## Text Generation Inference
+
+[Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is an open-source toolkit for serving LLMs tackling challenges such as response time. TGI powers inference solutions like [Inference Endpoints](https://huggingface.co/inference-endpoints) and [Hugging Chat](https://huggingface.co/chat/), as well as multiple community projects. You can use it to deploy any supported open-source large language model of your choice.
+
+## ChatUI Spaces
+
+Hugging Face Spaces includes templates to easily deploy your own instance of a specific application. [ChatUI](https://github.com/huggingface/chat-ui) is an open-source interface that enables serving conversational interface for large language models and can be deployed with few clicks at Spaces. TGI powers these Spaces under the hood for faster inference. Thanks to the template, you can deploy your own instance based on a large language model with only a few clicks and customize it. Learn more about it [here](https://huggingface.co/docs/hub/spaces-sdks-docker-chatui) and create your large language model instance [here](https://huggingface.co/new-space?template=huggingchat/chat-ui-template).
+
+![ChatUI](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/os_llms/docker_chat.png)
+
+## Useful Resources
+
+Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful!
+
+### Tools within Hugging Face Ecosystem
+
+- You can use [PEFT](https://github.com/huggingface/peft) to adapt large language models in efficient way.
+- [ChatUI](https://github.com/huggingface/chat-ui) is the open-source interface to conversate with Large Language Models.
+- [text-generation-inferface](https://github.com/huggingface/text-generation-inference)
+- [HuggingChat](https://huggingface.co/chat/) is a chat interface powered by Hugging Face to chat with powerful models like Llama 2 70B.
+
+### Documentation
+
+- [PEFT documentation](https://huggingface.co/docs/peft/index)
+- [ChatUI Docker Spaces](https://huggingface.co/docs/hub/spaces-sdks-docker-chatui)
+- [Causal language modeling task guide](https://huggingface.co/docs/transformers/tasks/language_modeling)
+- [Text generation strategies](https://huggingface.co/docs/transformers/generation_strategies)
+
+### Course and Blogs
+
+- [Course Chapter on Training a causal language model from scratch](https://huggingface.co/course/chapter7/6?fw=pt)
+- [TO Discussion with Victor Sanh](https://www.youtube.com/watch?v=Oy49SCW_Xpw&ab_channel=HuggingFace)
+- [Hugging Face Course Workshops: Pretraining Language Models & CodeParrot](https://www.youtube.com/watch?v=ExUR7w6xe94&ab_channel=HuggingFace)
+- [Training CodeParrot 🦜 from Scratch](https://huggingface.co/blog/codeparrot)
+- [How to generate text: using different decoding methods for language generation with Transformers](https://huggingface.co/blog/how-to-generate)
+- [Guiding Text Generation with Constrained Beam Search in 🤗 Transformers](https://huggingface.co/blog/constrained-beam-search)
+- [Code generation with Hugging Face](https://huggingface.co/spaces/codeparrot/code-generation-models)
+- [🌸 Introducing The World's Largest Open Multilingual Language Model: BLOOM 🌸](https://huggingface.co/blog/bloom)
+- [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed)
+- [Faster Text Generation with TensorFlow and XLA](https://huggingface.co/blog/tf-xla-generate)
+- [Assisted Generation: a new direction toward low-latency text generation](https://huggingface.co/blog/assisted-generation)
+- [Introducing RWKV - An RNN with the advantages of a transformer](https://huggingface.co/blog/rwkv)
+- [Creating a Coding Assistant with StarCoder](https://huggingface.co/blog/starchat-alpha)
+- [StarCoder: A State-of-the-Art LLM for Code](https://huggingface.co/blog/starcoder)
+- [Open-Source Text Generation & LLM Ecosystem at Hugging Face](https://huggingface.co/blog/os-llms)
+- [Llama 2 is at Hugging Face](https://huggingface.co/blog/llama2)
+
+### Notebooks
+
+- [Training a CLM in Flax](https://github.com/huggingface/notebooks/blob/master/examples/causal_language_modeling_flax.ipynb)
+- [Training a CLM in TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch-tf.ipynb)
+- [Training a CLM in PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch.ipynb)
+
+### Scripts for training
+
+- [Training a CLM in PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling)
+- [Training a CLM in TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling)
+- [Text Generation in PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation)
diff --git a/packages/tasks/src/text-generation/data.ts b/packages/tasks/src/text-generation/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..15b83ef0a8ed9a07365e86d67a0c606f07ec97cd
--- /dev/null
+++ b/packages/tasks/src/text-generation/data.ts
@@ -0,0 +1,126 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A large multilingual dataset of text crawled from the web.",
+			id: "mc4",
+		},
+		{
+			description:
+				"Diverse open-source data consisting of 22 smaller high-quality datasets. It was used to train GPT-Neo.",
+			id: "the_pile",
+		},
+		{
+			description: "A crowd-sourced instruction dataset to develop an AI assistant.",
+			id: "OpenAssistant/oasst1",
+		},
+		{
+			description: "A crowd-sourced instruction dataset created by Databricks employees.",
+			id: "databricks/databricks-dolly-15k",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "Once upon a time,",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				label: "Output",
+				content:
+					"Once upon a time, we knew that our ancestors were on the verge of extinction. The great explorers and poets of the Old World, from Alexander the Great to Chaucer, are dead and gone. A good many of our ancient explorers and poets have",
+				type: "text",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words",
+			id: "Cross Entropy",
+		},
+		{
+			description:
+				"The Perplexity metric is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance",
+			id: "Perplexity",
+		},
+	],
+	models: [
+		{
+			description: "A large language model trained for text generation.",
+			id: "bigscience/bloom-560m",
+		},
+		{
+			description: "A large code generation model that can generate code in 80+ languages.",
+			id: "bigcode/starcoder",
+		},
+		{
+			description: "A model trained to follow instructions, uses Pythia-12b as base model.",
+			id: "databricks/dolly-v2-12b",
+		},
+		{
+			description: "A model trained to follow instructions curated by community, uses Pythia-12b as base model.",
+			id: "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
+		},
+		{
+			description: "A large language model trained to generate text in English.",
+			id: "stabilityai/stablelm-tuned-alpha-7b",
+		},
+		{
+			description: "A model trained to follow instructions, based on mosaicml/mpt-7b.",
+			id: "mosaicml/mpt-7b-instruct",
+		},
+		{
+			description: "A large language model trained to generate text in English.",
+			id: "EleutherAI/pythia-12b",
+		},
+		{
+			description: "A large text-to-text model trained to follow instructions.",
+			id: "google/flan-ul2",
+		},
+		{
+			description: "A large and powerful text generation model.",
+			id: "tiiuae/falcon-40b",
+		},
+		{
+			description: "State-of-the-art open-source large language model.",
+			id: "meta-llama/Llama-2-70b-hf",
+		},
+	],
+	spaces: [
+		{
+			description: "A robust text generation model that can perform various tasks through natural language prompting.",
+			id: "huggingface/bloom_demo",
+		},
+		{
+			description: "An text generation based application that can write code for 80+ languages.",
+			id: "bigcode/bigcode-playground",
+		},
+		{
+			description: "An text generation based application for conversations.",
+			id: "h2oai/h2ogpt-chatbot",
+		},
+		{
+			description: "An text generation application that combines OpenAI and Hugging Face models.",
+			id: "microsoft/HuggingGPT",
+		},
+		{
+			description: "An text generation application that uses StableLM-tuned-alpha-7b.",
+			id: "stabilityai/stablelm-tuned-alpha-chat",
+		},
+		{
+			description: "An UI that uses StableLM-tuned-alpha-7b.",
+			id: "togethercomputer/OpenChatKit",
+		},
+	],
+	summary:
+		"Generating text is the task of producing new text. These models can, for example, fill in incomplete text or paraphrase.",
+	widgetModels: ["tiiuae/falcon-7b-instruct"],
+	youtubeId: "Vpjb1lu0MDk",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/text-to-image/about.md b/packages/tasks/src/text-to-image/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..e7c79fb43a2483720638c168e9f9cadb9bd9aa5d
--- /dev/null
+++ b/packages/tasks/src/text-to-image/about.md
@@ -0,0 +1,66 @@
+## Use Cases
+
+### Data Generation
+
+Businesses can generate data for their their use cases by inputting text and getting image outputs.
+
+### Immersive Conversational Chatbots
+
+Chatbots can be made more immersive if they provide contextual images based on the input provided by the user.
+
+### Creative Ideas for Fashion Industry
+
+Different patterns can be generated to obtain unique pieces of fashion. Text-to-image models make creations easier for designers to conceptualize their design before actually implementing it.
+
+### Architecture Industry
+
+Architects can utilise the models to construct an environment based out on the requirements of the floor plan. This can also include the furniture that has to be placed in that environment.
+
+## Task Variants
+
+You can contribute variants of this task [here](https://github.com/huggingface/hub-docs/blob/main/tasks/src/text-to-image/about.md).
+
+## Inference
+
+You can use diffusers pipelines to infer with `text-to-image` models.
+
+```python
+from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
+
+model_id = "stabilityai/stable-diffusion-2"
+scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
+pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)
+pipe = pipe.to("cuda")
+
+prompt = "a photo of an astronaut riding a horse on mars"
+image = pipe(prompt).images[0]
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer text-to-image models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.textToImage({
+	model: "stabilityai/stable-diffusion-2",
+	inputs: "award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]",
+	parameters: {
+		negative_prompt: "blurry",
+	},
+});
+```
+
+## Useful Resources
+
+- [Hugging Face Diffusion Models Course](https://github.com/huggingface/diffusion-models-class)
+- [Getting Started with Diffusers](https://huggingface.co/docs/diffusers/index)
+- [Text-to-Image Generation](https://huggingface.co/docs/diffusers/using-diffusers/conditional_image_generation)
+- [MinImagen - Build Your Own Imagen Text-to-Image Model](https://www.assemblyai.com/blog/minimagen-build-your-own-imagen-text-to-image-model/)
+- [Using LoRA for Efficient Stable Diffusion Fine-Tuning](https://huggingface.co/blog/lora)
+- [Using Stable Diffusion with Core ML on Apple Silicon](https://huggingface.co/blog/diffusers-coreml)
+- [A guide on Vector Quantized Diffusion](https://huggingface.co/blog/vq-diffusion)
+- [🧨 Stable Diffusion in JAX/Flax](https://huggingface.co/blog/stable_diffusion_jax)
+- [Running IF with 🧨 diffusers on a Free Tier Google Colab](https://huggingface.co/blog/if)
+
+This page was made possible thanks to the efforts of [Ishan Dutta](https://huggingface.co/ishandutta), [Enrique Elias Ubaldo](https://huggingface.co/herrius) and [Oğuz Akif](https://huggingface.co/oguzakif).
diff --git a/packages/tasks/src/text-to-image/data.ts b/packages/tasks/src/text-to-image/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..0a6c41ac13abf5149fef865f32fc4afee8f041a1
--- /dev/null
+++ b/packages/tasks/src/text-to-image/data.ts
@@ -0,0 +1,94 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "RedCaps is a large-scale dataset of 12M image-text pairs collected from Reddit.",
+			id: "red_caps",
+		},
+		{
+			description: "Conceptual Captions is a dataset consisting of ~3.3M images annotated with captions.",
+			id: "conceptual_captions",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "A city above clouds, pastel colors, Victorian style",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				filename: "image.jpeg",
+				type: "img",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"The Inception Score (IS) measure assesses diversity and meaningfulness. It uses a generated image sample to predict its label. A higher score signifies more diverse and meaningful images.",
+			id: "IS",
+		},
+		{
+			description:
+				"The Fréchet Inception Distance (FID) calculates the distance between distributions between synthetic and real samples. A lower FID score indicates better similarity between the distributions of real and generated images.",
+			id: "FID",
+		},
+		{
+			description:
+				"R-precision assesses how the generated image aligns with the provided text description. It uses the generated images as queries to retrieve relevant text descriptions. The top 'r' relevant descriptions are selected and used to calculate R-precision as r/R, where 'R' is the number of ground truth descriptions associated with the generated images. A higher R-precision value indicates a better model.",
+			id: "R-Precision",
+		},
+	],
+	models: [
+		{
+			description:
+				"A latent text-to-image diffusion model capable of generating photo-realistic images given any text input.",
+			id: "CompVis/stable-diffusion-v1-4",
+		},
+		{
+			description:
+				"A model that can be used to generate images based on text prompts. The DALL·E Mega model is the largest version of DALLE Mini.",
+			id: "dalle-mini/dalle-mega",
+		},
+		{
+			description: "A text-to-image model that can generate coherent text inside image.",
+			id: "DeepFloyd/IF-I-XL-v1.0",
+		},
+		{
+			description: "A powerful text-to-image model.",
+			id: "kakaobrain/karlo-v1-alpha",
+		},
+	],
+	spaces: [
+		{
+			description: "A powerful text-to-image application.",
+			id: "stabilityai/stable-diffusion",
+		},
+		{
+			description: "An text-to-image application that can generate coherent text inside the image.",
+			id: "DeepFloyd/IF",
+		},
+		{
+			description: "An powerful text-to-image application that can generate images.",
+			id: "kakaobrain/karlo",
+		},
+		{
+			description: "An powerful text-to-image application that can generates 3D representations.",
+			id: "hysts/Shap-E",
+		},
+		{
+			description: "A strong application for `text-to-image`, `image-to-image` and image inpainting.",
+			id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI",
+		},
+	],
+	summary:
+		"Generates images from input text. These models can be used to generate and modify images based on text prompts.",
+	widgetModels: ["CompVis/stable-diffusion-v1-4"],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/text-to-speech/about.md b/packages/tasks/src/text-to-speech/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..5b2fc6530b4164418e68feb8ac62f0d6bc70d949
--- /dev/null
+++ b/packages/tasks/src/text-to-speech/about.md
@@ -0,0 +1,62 @@
+## Use Cases
+
+Text-to-Speech (TTS) models can be used in any speech-enabled application that requires converting text to speech imitating human voice.
+
+### Voice Assistants
+
+TTS models are used to create voice assistants on smart devices. These models are a better alternative compared to concatenative methods where the assistant is built by recording sounds and mapping them, since the outputs in TTS models contain elements in natural speech such as emphasis.
+
+### Announcement Systems
+
+TTS models are widely used in airport and public transportation announcement systems to convert the announcement of a given text into speech.
+
+## Inference API
+
+The Hub contains over [1500 TTS models](https://huggingface.co/models?pipeline_tag=text-to-speech&sort=downloads) that you can use right away by trying out the widgets directly in the browser or calling the models as a service using the Inference API. Here is a simple code snippet to get you started:
+
+```python
+import json
+import requests
+
+headers = {"Authorization": f"Bearer {API_TOKEN}"}
+API_URL = "https://api-inference.huggingface.co/models/microsoft/speecht5_tts"
+
+def query(payload):
+	response = requests.post(API_URL, headers=headers, json=payload)
+	return response
+
+output = query({"text_inputs": "This is a test"})
+```
+
+You can also use libraries such as [espnet](https://huggingface.co/models?library=espnet&pipeline_tag=text-to-speech&sort=downloads) or [transformers](https://huggingface.co/models?pipeline_tag=text-to-speech&library=transformers&sort=trending) if you want to handle the Inference directly.
+
+## Direct Inference
+
+Now, you can also use the Text-to-Speech pipeline in Transformers to synthesise high quality voice.
+
+```python
+from transformers import pipeline
+
+synthesizer = pipeline("text-to-speech", "suno/bark")
+
+synthesizer("Look I am generating speech in three lines of code!")
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer summarization models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.textToSpeech({
+	model: "facebook/mms-tts",
+	inputs: "text to generate speech from",
+});
+```
+
+## Useful Resources
+
+- [ML for Audio Study Group - Text to Speech Deep Dive](https://www.youtube.com/watch?v=aLBedWj-5CQ)
+- [An introduction to SpeechT5, a multi-purpose speech recognition and synthesis model](https://huggingface.co/blog/speecht5).
+- [A guide on Fine-tuning Whisper For Multilingual ASR with 🤗Transformers](https://huggingface.co/blog/fine-tune-whisper)
+- [Speech Synthesis, Recognition, and More With SpeechT5](https://huggingface.co/blog/speecht5)
diff --git a/packages/tasks/src/text-to-speech/data.ts b/packages/tasks/src/text-to-speech/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..031aa96d3074df0987e50d24211c1046e2149c4f
--- /dev/null
+++ b/packages/tasks/src/text-to-speech/data.ts
@@ -0,0 +1,69 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "Thousands of short audio clips of a single speaker.",
+			id: "lj_speech",
+		},
+		{
+			description: "Multi-speaker English dataset.",
+			id: "LibriTTS",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "I love audio models on the Hub!",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				filename: "audio.wav",
+				type: "audio",
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "The Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated speech.",
+			id: "mel cepstral distortion",
+		},
+	],
+	models: [
+		{
+			description: "A powerful TTS model.",
+			id: "suno/bark",
+		},
+		{
+			description: "A massively multi-lingual TTS model.",
+			id: "facebook/mms-tts",
+		},
+		{
+			description: "An end-to-end speech synthesis model.",
+			id: "microsoft/speecht5_tts",
+		},
+	],
+	spaces: [
+		{
+			description: "An application for generate highly realistic, multilingual speech.",
+			id: "suno/bark",
+		},
+		{
+			description: "An application that contains multiple speech synthesis models for various languages and accents.",
+			id: "coqui/CoquiTTS",
+		},
+		{
+			description: "An application that synthesizes speech for various speaker types.",
+			id: "Matthijs/speecht5-tts-demo",
+		},
+	],
+	summary:
+		"Text-to-Speech (TTS) is the task of generating natural sounding speech given text input. TTS models can be extended to have a single model that generates speech for multiple speakers and multiple languages.",
+	widgetModels: ["microsoft/speecht5_tts"],
+	youtubeId: "NW62DpzJ274",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/text-to-video/about.md b/packages/tasks/src/text-to-video/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..898d638c264aa8219cdc3a71d1a4562de0d084b8
--- /dev/null
+++ b/packages/tasks/src/text-to-video/about.md
@@ -0,0 +1,41 @@
+## Use Cases
+
+### Script-based Video Generation
+
+Text-to-video models can be used to create short-form video content from a provided text script. These models can be used to create engaging and informative marketing videos. For example, a company could use a text-to-video model to create a video that explains how their product works.
+
+### Content format conversion
+
+Text-to-video models can be used to generate videos from long-form text, including blog posts, articles, and text files. Text-to-video models can be used to create educational videos that are more engaging and interactive. An example of this is creating a video that explains a complex concept from an article.
+
+### Voice-overs and Speech
+
+Text-to-video models can be used to create an AI newscaster to deliver daily news, or for a film-maker to create a short film or a music video.
+
+## Task Variants
+Text-to-video models have different variants based on inputs and outputs.
+
+### Text-to-video Editing
+
+One text-to-video task is generating text-based video style and local attribute editing. Text-to-video editing models can make it easier to perform tasks like cropping, stabilization, color correction, resizing and audio editing consistently.
+
+### Text-to-video Search
+
+Text-to-video search is the task of retrieving videos that are relevant to a given text query. This can be challenging, as videos are a complex medium that can contain a lot of information. By using semantic analysis to extract the meaning of the text query, visual analysis to extract features from the videos, such as the objects and actions that are present in the video, and temporal analysis to categorize relationships between the objects and actions in the video, we can determine which videos are most likely to be relevant to the text query.
+
+### Text-driven Video Prediction
+
+Text-driven video prediction is the task of generating a video sequence from a text description. Text description can be anything from a simple sentence to a detailed story. The goal of this task is to generate a video that is both visually realistic and semantically consistent with the text description.
+
+### Video Translation
+
+Text-to-video translation models can translate videos from one language to another or allow to query the multilingual text-video model with non-English sentences. This can be useful for people who want to watch videos in a language that they don't understand, especially when multi-lingual captions are available for training.
+
+## Inference
+Contribute an inference snippet for text-to-video here!
+
+## Useful Resources
+
+In this area, you can insert useful resources about how to train or use a model for this task.
+
+- [Text-to-Video: The Task, Challenges and the Current State](https://huggingface.co/blog/text-to-video)
diff --git a/packages/tasks/src/text-to-video/data.ts b/packages/tasks/src/text-to-video/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..17fba749c0eaba78b38ebdc112f55fc60336965e
--- /dev/null
+++ b/packages/tasks/src/text-to-video/data.ts
@@ -0,0 +1,102 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "Microsoft Research Video to Text is a large-scale dataset for open domain video captioning",
+			id: "iejMac/CLIP-MSR-VTT",
+		},
+		{
+			description: "UCF101 Human Actions dataset consists of 13,320 video clips from YouTube, with 101 classes.",
+			id: "quchenyuan/UCF101-ZIP",
+		},
+		{
+			description: "A high-quality dataset for human action recognition in YouTube videos.",
+			id: "nateraw/kinetics",
+		},
+		{
+			description: "A dataset of video clips of humans performing pre-defined basic actions with everyday objects.",
+			id: "HuggingFaceM4/something_something_v2",
+		},
+		{
+			description:
+				"This dataset consists of text-video pairs and contains noisy samples with irrelevant video descriptions",
+			id: "HuggingFaceM4/webvid",
+		},
+		{
+			description: "A dataset of short Flickr videos for the temporal localization of events with descriptions.",
+			id: "iejMac/CLIP-DiDeMo",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "Darth Vader is surfing on the waves.",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				filename: "text-to-video-output.gif",
+				type: "img",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"Inception Score uses an image classification model that predicts class labels and evaluates how distinct and diverse the images are. A higher score indicates better video generation.",
+			id: "is",
+		},
+		{
+			description:
+				"Frechet Inception Distance uses an image classification model to obtain image embeddings. The metric compares mean and standard deviation of the embeddings of real and generated images. A smaller score indicates better video generation.",
+			id: "fid",
+		},
+		{
+			description:
+				"Frechet Video Distance uses a model that captures coherence for changes in frames and the quality of each frame. A smaller score indicates better video generation.",
+			id: "fvd",
+		},
+		{
+			description:
+				"CLIPSIM measures similarity between video frames and text using an image-text similarity model. A higher score indicates better video generation.",
+			id: "clipsim",
+		},
+	],
+	models: [
+		{
+			description: "A strong model for video generation.",
+			id: "PAIR/text2video-zero-controlnet-canny-arcane",
+		},
+		{
+			description: "A robust model for text-to-video generation.",
+			id: "damo-vilab/text-to-video-ms-1.7b",
+		},
+		{
+			description: "A text-to-video generation model with high quality and smooth outputs.",
+			id: "cerspense/zeroscope_v2_576w",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that generates video from text.",
+			id: "fffiloni/zeroscope",
+		},
+		{
+			description: "An application that generates video from image and text.",
+			id: "TempoFunk/makeavid-sd-jax",
+		},
+		{
+			description: "An application that generates videos from text and provides multi-model support.",
+			id: "ArtGAN/Video-Diffusion-WebUI",
+		},
+	],
+	summary:
+		"Text-to-video models can be used in any application that requires generating consistent sequence of images from text. ",
+	widgetModels: [],
+	youtubeId: undefined,
+};
+
+export default taskData;
diff --git a/packages/tasks/src/token-classification/about.md b/packages/tasks/src/token-classification/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..9b0701385b5793f32bdafd890c476a4efb99b509
--- /dev/null
+++ b/packages/tasks/src/token-classification/about.md
@@ -0,0 +1,76 @@
+## Use Cases
+
+### Information Extraction from Invoices
+
+You can extract entities of interest from invoices automatically using Named Entity Recognition (NER) models. Invoices can be read with Optical Character Recognition models and the output can be used to do inference with NER models. In this way, important information such as date, company name, and other named entities can be extracted.
+
+## Task Variants
+
+### Named Entity Recognition (NER)
+
+NER is the task of recognizing named entities in a text. These entities can be the names of people, locations, or organizations. The task is formulated as labeling each token with a class for each named entity and a class named "0" for tokens that do not contain any entities. The input for this task is text and the output is the annotated text with named entities.
+
+#### Inference
+
+You can use the 🤗 Transformers library `ner` pipeline to infer with NER models.
+
+```python
+from transformers import pipeline
+
+classifier = pipeline("ner")
+classifier("Hello I'm Omar and I live in Zürich.")
+```
+
+### Part-of-Speech (PoS) Tagging
+In PoS tagging, the model recognizes parts of speech, such as nouns, pronouns, adjectives, or verbs, in a given text. The task is formulated as labeling each word with a part of the speech.
+
+#### Inference
+
+You can use the 🤗 Transformers library `token-classification` pipeline with a POS tagging model of your choice. The model will return a json with PoS tags for each token.
+
+```python
+from transformers import pipeline
+
+classifier = pipeline("token-classification", model = "vblagoje/bert-english-uncased-finetuned-pos")
+classifier("Hello I'm Omar and I live in Zürich.")
+```
+
+This is not limited to transformers! You can also use other libraries such as Stanza, spaCy, and Flair to do inference! Here is an example using a canonical [spaCy](https://hf.co/blog/spacy) model.
+
+```python
+!pip install https://huggingface.co/spacy/en_core_web_sm/resolve/main/en_core_web_sm-any-py3-none-any.whl
+
+import en_core_web_sm
+
+nlp = en_core_web_sm.load()
+doc = nlp("I'm Omar and I live in Zürich.")
+for token in doc:
+    print(token.text, token.pos_, token.dep_, token.ent_type_)
+
+## I PRON nsubj
+## 'm AUX ROOT
+## Omar PROPN attr PERSON
+### ...
+```
+
+## Useful Resources
+
+Would you like to learn more about token classification? Great! Here you can find some curated resources that you may find helpful!
+
+- [Course Chapter on Token Classification](https://huggingface.co/course/chapter7/2?fw=pt)
+- [Blog post: Welcome spaCy to the Hugging Face Hub](https://huggingface.co/blog/spacy)
+
+### Notebooks
+
+- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb)
+- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb)
+
+### Scripts for training
+
+- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification)
+- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow)
+- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification)
+
+### Documentation
+
+- [Token classification task guide](https://huggingface.co/docs/transformers/tasks/token_classification)
diff --git a/packages/tasks/src/token-classification/data.ts b/packages/tasks/src/token-classification/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..cb3e211df92f7366d1209c194c0ec481cd0380c1
--- /dev/null
+++ b/packages/tasks/src/token-classification/data.ts
@@ -0,0 +1,84 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A widely used dataset useful to benchmark named entity recognition models.",
+			id: "conll2003",
+		},
+		{
+			description:
+				"A multilingual dataset of Wikipedia articles annotated for named entity recognition in over 150 different languages.",
+			id: "wikiann",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "My name is Omar and I live in Zürich.",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				text: "My name is Omar and I live in Zürich.",
+				tokens: [
+					{
+						type: "PERSON",
+						start: 11,
+						end: 15,
+					},
+					{
+						type: "GPE",
+						start: 30,
+						end: 36,
+					},
+				],
+				type: "text-with-tokens",
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "",
+			id: "accuracy",
+		},
+		{
+			description: "",
+			id: "recall",
+		},
+		{
+			description: "",
+			id: "precision",
+		},
+		{
+			description: "",
+			id: "f1",
+		},
+	],
+	models: [
+		{
+			description:
+				"A robust performance model to identify people, locations, organizations and names of miscellaneous entities.",
+			id: "dslim/bert-base-NER",
+		},
+		{
+			description: "Flair models are typically the state of the art in named entity recognition tasks.",
+			id: "flair/ner-english",
+		},
+	],
+	spaces: [
+		{
+			description:
+				"An application that can recognizes entities, extracts noun chunks and recognizes various linguistic features of each token.",
+			id: "spacy/gradio_pipeline_visualizer",
+		},
+	],
+	summary:
+		"Token classification is a natural language understanding task in which a label is assigned to some tokens in a text. Some popular token classification subtasks are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models could be trained to identify specific entities in a text, such as dates, individuals and places; and PoS tagging would identify, for example, which words in a text are verbs, nouns, and punctuation marks.",
+	widgetModels: ["dslim/bert-base-NER"],
+	youtubeId: "wVHdVlPScxA",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/translation/about.md b/packages/tasks/src/translation/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..39755a8db4de8934ee7ec3881b942346e02100c5
--- /dev/null
+++ b/packages/tasks/src/translation/about.md
@@ -0,0 +1,65 @@
+## Use Cases
+
+You can find over a thousand Translation models on the Hub, but sometimes you might not find a model for the language pair you are interested in. When this happen, you can use a pretrained multilingual Translation model like [mBART](https://huggingface.co/facebook/mbart-large-cc25) and further train it on your own data in a process called fine-tuning.
+
+### Multilingual conversational agents
+
+Translation models can be used to build conversational agents across different languages. This can be done in two ways.
+
+- **Translate the dataset to a new language.** You can translate a dataset of intents (inputs) and responses to the target language. You can then train a new intent classification model with this new dataset. This allows you to proofread responses in the target language and have better control of the chatbot's outputs.
+
+* **Translate the input and output of the agent.** You can use a Translation model in user inputs so that the chatbot can process it. You can then translate the output of the chatbot into the language of the user. This approach might be less reliable as the chatbot will generate responses that were not defined before.
+
+## Inference
+
+You can use the 🤗 Transformers library with the `translation_xx_to_yy` pattern where xx is the source language code and yy is the target language code. The default model for the pipeline is [t5-base](https://huggingface.co/t5-base) which under the hood adds a task prefix indicating the task itself, e.g. “translate: English to French”.
+
+```python
+from transformers import pipeline
+en_fr_translator = pipeline("translation_en_to_fr")
+en_fr_translator("How old are you?")
+## [{'translation_text': ' quel âge êtes-vous?'}]
+```
+
+If you’d like to use a specific model checkpoint that is from one specific language to another, you can also directly use the `translation` pipeline.
+
+```python
+from transformers import pipeline
+
+model_checkpoint = "Helsinki-NLP/opus-mt-en-fr"
+translator = pipeline("translation", model=model_checkpoint)
+translator("How are you?")
+# [{'translation_text': 'Comment allez-vous ?'}]
+```
+
+You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer translation models on Hugging Face Hub.
+
+```javascript
+import { HfInference } from "@huggingface/inference";
+
+const inference = new HfInference(HF_ACCESS_TOKEN);
+await inference.translation({
+	model: "t5-base",
+	inputs: "My name is Wolfgang and I live in Berlin",
+});
+```
+
+## Useful Resources
+
+Would you like to learn more about Translation? Great! Here you can find some curated resources that you may find helpful!
+
+- [Course Chapter on Translation](https://huggingface.co/course/chapter7/4?fw=pt)
+
+### Notebooks
+
+- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/translation.ipynb)
+- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/translation-tf.ipynb)
+
+### Scripts for training
+
+- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/translation)
+- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/translation)
+
+### Documentation
+
+- [Translation task guide](https://huggingface.co/docs/transformers/tasks/translation)
diff --git a/packages/tasks/src/translation/data.ts b/packages/tasks/src/translation/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..7973a77bd0142c603b5147dcdbf4f97501f8c9fa
--- /dev/null
+++ b/packages/tasks/src/translation/data.ts
@@ -0,0 +1,68 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A dataset of copyright-free books translated into 16 different languages.",
+			id: "opus_books",
+		},
+		{
+			description:
+				"An example of translation between programming languages. This dataset consists of functions in Java and C#.",
+			id: "code_x_glue_cc_code_to_code_trans",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Input",
+				content: "My name is Omar and I live in Zürich.",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				label: "Output",
+				content: "Mein Name ist Omar und ich wohne in Zürich.",
+				type: "text",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"BLEU score is calculated by counting the number of shared single or subsequent tokens between the generated sequence and the reference. Subsequent n tokens are called “n-grams”. Unigram refers to a single token while bi-gram refers to token pairs and n-grams refer to n subsequent tokens. The score ranges from 0 to 1, where 1 means the translation perfectly matched and 0 did not match at all",
+			id: "bleu",
+		},
+		{
+			description: "",
+			id: "sacrebleu",
+		},
+	],
+	models: [
+		{
+			description: "A model that translates from English to French.",
+			id: "Helsinki-NLP/opus-mt-en-fr",
+		},
+		{
+			description:
+				"A general-purpose Transformer that can be used to translate from English to German, French, or Romanian.",
+			id: "t5-base",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that can translate between 100 languages.",
+			id: "Iker/Translate-100-languages",
+		},
+		{
+			description: "An application that can translate between English, Spanish and Hindi.",
+			id: "EuroPython2022/Translate-with-Bloom",
+		},
+	],
+	summary: "Translation is the task of converting text from one language to another.",
+	widgetModels: ["t5-small"],
+	youtubeId: "1JvfrvZgi6c",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/unconditional-image-generation/about.md b/packages/tasks/src/unconditional-image-generation/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..e5a9585528ae0afd0bc779d0d8628ceca167376e
--- /dev/null
+++ b/packages/tasks/src/unconditional-image-generation/about.md
@@ -0,0 +1,50 @@
+## About the Task
+
+Unconditional image generation is the task of generating new images without any specific input. The main goal of this is to create novel, original images that are not based on existing images.
+This can be used for a variety of applications, such as creating new artistic images, improving image recognition algorithms, or generating photorealistic images for virtual reality environments.
+
+Unconditional image generation models usually start with a _seed_ that generates a _random noise vector_. The model will then use this vector to create an output image similar to the images used for training the model.
+
+An example of unconditional image generation would be generating the image of a face on a model trained with the [CelebA dataset](https://huggingface.co/datasets/huggan/CelebA-HQ) or [generating a butterfly](https://huggingface.co/spaces/huggan/butterfly-gan) on a model trained with the [Smithsonian Butterflies dataset](https://huggingface.co/datasets/ceyda/smithsonian_butterflies).
+
+[Generative adversarial networks](https://en.wikipedia.org/wiki/Generative_adversarial_network) and [Diffusion](https://huggingface.co/docs/diffusers/index) are common architectures for this task.
+
+## Use Cases
+
+Unconditional image generation can be used for a variety of applications.
+
+### Artistic Expression
+
+Unconditional image generation can be used to create novel, original artwork that is not based on any existing images. This can be used to explore new creative possibilities and produce unique, imaginative images.
+
+### Data Augmentation
+
+Unconditional image generation models can be used to generate new images to improve the performance of image recognition algorithms. This makes algorithms more robust and able to handle a broader range of images.
+
+### Virtual Reality
+
+Unconditional image generation models can be used to create photorealistic images that can be used in virtual reality environments. This makes the VR experience more immersive and realistic.
+
+### Medical Imaging
+
+Unconditional image generation models can generate new medical images, such as CT or MRI scans, that can be used to train and evaluate medical imaging algorithms. This can improve the accuracy and reliability of these algorithms.
+
+### Industrial Design
+
+Unconditional image generation models can generate new designs for products, such as clothing or furniture, that are not based on any existing designs. This way, designers can explore new creative possibilities and produce unique, innovative designs.
+
+## Model Hosting and Inference
+
+This section should have useful information about Model Hosting and Inference
+
+## Useful Resources
+
+- [Hugging Face Diffusion Models Course](https://github.com/huggingface/diffusion-models-class)
+- [Getting Started with Diffusers](https://huggingface.co/docs/diffusers/index)
+- [Unconditional Image Generation Training](https://huggingface.co/docs/diffusers/training/unconditional_training)
+
+### Training your own model in just a few seconds
+
+In this area, you can insert useful information about training the model
+
+This page was made possible thanks to the efforts of [Someet Sahoo](https://huggingface.co/Someet24) and [Juan Carlos Piñeros](https://huggingface.co/juancopi81).
diff --git a/packages/tasks/src/unconditional-image-generation/data.ts b/packages/tasks/src/unconditional-image-generation/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..f9eeac7e45900294fa268234f92a8f7b7f5ff494
--- /dev/null
+++ b/packages/tasks/src/unconditional-image-generation/data.ts
@@ -0,0 +1,72 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description:
+				"The CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with 600 images per class.",
+			id: "cifar100",
+		},
+		{
+			description: "Multiple images of celebrities, used for facial expression translation.",
+			id: "CelebA",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Seed",
+				content: "42",
+				type: "text",
+			},
+			{
+				label: "Number of images to generate:",
+				content: "4",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				filename: "unconditional-image-generation-output.jpeg",
+				type: "img",
+			},
+		],
+	},
+	metrics: [
+		{
+			description:
+				"The inception score (IS) evaluates the quality of generated images. It measures the diversity of the generated images (the model predictions are evenly distributed across all possible labels) and their 'distinction' or 'sharpness' (the model confidently predicts a single label for each image).",
+			id: "Inception score (IS)",
+		},
+		{
+			description:
+				"The Fréchet Inception Distance (FID) evaluates the quality of images created by a generative model by calculating the distance between feature vectors for real and generated images.",
+			id: "Frećhet Inception Distance (FID)",
+		},
+	],
+	models: [
+		{
+			description:
+				"High-quality image generation model trained on the CIFAR-10 dataset. It synthesizes images of the ten classes presented in the dataset using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.",
+			id: "google/ddpm-cifar10-32",
+		},
+		{
+			description:
+				"High-quality image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes images of faces using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.",
+			id: "google/ddpm-celebahq-256",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that can generate realistic faces.",
+			id: "CompVis/celeba-latent-diffusion",
+		},
+	],
+	summary:
+		"Unconditional image generation is the task of generating images with no condition in any context (like a prompt text or another image). Once trained, the model will create images that resemble its training data distribution.",
+	widgetModels: [""],
+	// TODO: Add related video
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/video-classification/about.md b/packages/tasks/src/video-classification/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc87585af4f9eb3092e169cb2a61e32772a5034c
--- /dev/null
+++ b/packages/tasks/src/video-classification/about.md
@@ -0,0 +1,57 @@
+## Use Cases
+
+Video classification models can be used to categorize what a video is all about.
+
+### Activity Recognition
+
+Video classification models are used to perform activity recognition which is useful for fitness applications. Activity recognition is also helpful for vision-impaired individuals especially when they're commuting.
+
+### Video Search
+
+Models trained in video classification can improve user experience by organizing and categorizing video galleries on the phone or in the cloud, on multiple keywords or tags.
+
+## Inference
+
+Below you can find code for inferring with a pre-trained video classification model.
+
+```python
+from transformers import VideoMAEFeatureExtractor, VideoMAEForVideoClassification
+from pytorchvideo.transforms import UniformTemporalSubsample
+from pytorchvideo.data.encoded_video import EncodedVideo
+
+
+# Load the video.
+video = EncodedVideo.from_path("path_to_video.mp4")
+video_data = video.get_clip(start_sec=0, end_sec=4.0)["video"]
+
+# Sub-sample a fixed set of frames and convert them to a NumPy array.
+num_frames = 16
+subsampler = UniformTemporalSubsample(num_frames)
+subsampled_frames = subsampler(video_data)
+video_data_np = subsampled_frames.numpy().transpose(1, 2, 3, 0)
+
+# Preprocess the video frames.
+inputs = feature_extractor(list(video_data_np), return_tensors="pt")
+
+# Run inference
+with torch.no_grad():
+    outputs = model(**inputs)
+    logits = outputs.logits
+
+# Model predicts one of the 400 Kinetics 400 classes
+predicted_label = logits.argmax(-1).item()
+print(model.config.id2label[predicted_label])
+# `eating spaghetti` (if you chose this video:
+# https://hf.co/datasets/nielsr/video-demo/resolve/main/eating_spaghetti.mp4)
+```
+
+## Useful Resources
+
+- [Developing a simple video classification model](https://keras.io/examples/vision/video_classification)
+- [Video classification with Transformers](https://keras.io/examples/vision/video_transformers)
+- [Building a video archive](https://www.youtube.com/watch?v=_IeS1m8r6SY)
+- [Video classification task guide](https://huggingface.co/docs/transformers/tasks/video_classification)
+
+### Creating your own video classifier in minutes
+
+- [Fine-tuning tutorial notebook (PyTorch)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/video_classification.ipynb)
diff --git a/packages/tasks/src/video-classification/data.ts b/packages/tasks/src/video-classification/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..2f6e4d93551ddea48d5c64b831d23ac79fbe9e5a
--- /dev/null
+++ b/packages/tasks/src/video-classification/data.ts
@@ -0,0 +1,84 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			// TODO write proper description
+			description: "Benchmark dataset used for video classification with videos that belong to 400 classes.",
+			id: "kinetics400",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "video-classification-input.gif",
+				type: "img",
+			},
+		],
+		outputs: [
+			{
+				type: "chart",
+				data: [
+					{
+						label: "Playing Guitar",
+						score: 0.514,
+					},
+					{
+						label: "Playing Tennis",
+						score: 0.193,
+					},
+					{
+						label: "Cooking",
+						score: 0.068,
+					},
+				],
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "",
+			id: "accuracy",
+		},
+		{
+			description: "",
+			id: "recall",
+		},
+		{
+			description: "",
+			id: "precision",
+		},
+		{
+			description: "",
+			id: "f1",
+		},
+	],
+	models: [
+		{
+			// TO DO: write description
+			description: "Strong Video Classification model trained on the Kinects 400 dataset.",
+			id: "MCG-NJU/videomae-base-finetuned-kinetics",
+		},
+		{
+			// TO DO: write description
+			description: "Strong Video Classification model trained on the Kinects 400 dataset.",
+			id: "microsoft/xclip-base-patch32",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that classifies video at different timestamps.",
+			id: "nateraw/lavila",
+		},
+		{
+			description: "An application that classifies video.",
+			id: "fcakyon/video-classification",
+		},
+	],
+	summary:
+		"Video classification is the task of assigning a label or class to an entire video. Videos are expected to have only one class for each video. Video classification models take a video as input and return a prediction about which class the video belongs to.",
+	widgetModels: [],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/visual-question-answering/about.md b/packages/tasks/src/visual-question-answering/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..7f96e1679b8a5b46042f5c6e2eb533e80749160f
--- /dev/null
+++ b/packages/tasks/src/visual-question-answering/about.md
@@ -0,0 +1,48 @@
+## Use Cases
+
+### Aid the Visually Impaired Persons
+
+VQA models can be used to reduce visual barriers for visually impaired individuals by allowing them to get information about images from the web and the real world.
+
+### Education
+
+VQA models can be used to improve experiences at museums by allowing observers to directly ask questions they interested in.
+
+### Improved Image Retrieval
+
+Visual question answering models can be used to retrieve images with specific characteristics. For example, the user can ask "Is there a dog?" to find all images with dogs from a set of images.
+
+### Video Search
+
+Specific snippets/timestamps of a video can be retrieved based on search queries. For example, the user can ask "At which part of the video does the guitar appear?" and get a specific timestamp range from the whole video.
+
+## Task Variants
+
+### Video Question Answering
+
+Video Question Answering aims to answer questions asked about the content of a video.
+
+## Inference
+
+You can infer with Visual Question Answering models using the `vqa` (or `visual-question-answering`) pipeline. This pipeline requires [the Python Image Library (PIL)](https://pillow.readthedocs.io/en/stable/) to process images. You can install it with (`pip install pillow`).
+
+```python
+from PIL import Image
+from transformers import pipeline
+
+vqa_pipeline = pipeline("visual-question-answering")
+
+image =  Image.open("elephant.jpeg")
+question = "Is there an elephant?"
+
+vqa_pipeline(image, question, top_k=1)
+#[{'score': 0.9998154044151306, 'answer': 'yes'}]
+```
+
+## Useful Resources
+
+- [An introduction to Visual Question Answering - AllenAI](https://blog.allenai.org/vanilla-vqa-adcaaaa94336)
+- [Multi Modal Framework (MMF) - Meta Research](https://mmf.sh/docs/getting_started/video_overview/)
+
+The contents of this page are contributed by [
+Bharat Raghunathan](https://huggingface.co/bharat-raghunathan) and [Jose Londono Botero](https://huggingface.co/jlondonobo).
diff --git a/packages/tasks/src/visual-question-answering/data.ts b/packages/tasks/src/visual-question-answering/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..83a7e69496945aea9867eadd25d89af4fb9f79c9
--- /dev/null
+++ b/packages/tasks/src/visual-question-answering/data.ts
@@ -0,0 +1,93 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A widely used dataset containing questions (with answers) about images.",
+			id: "Graphcore/vqa",
+		},
+		{
+			description: "A dataset to benchmark visual reasoning based on text in images.",
+			id: "textvqa",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "elephant.jpeg",
+				type: "img",
+			},
+			{
+				label: "Question",
+				content: "What is in this image?",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				type: "chart",
+				data: [
+					{
+						label: "elephant",
+						score: 0.97,
+					},
+					{
+						label: "elephants",
+						score: 0.06,
+					},
+					{
+						label: "animal",
+						score: 0.003,
+					},
+				],
+			},
+		],
+	},
+	isPlaceholder: false,
+	metrics: [
+		{
+			description: "",
+			id: "accuracy",
+		},
+		{
+			description:
+				"Measures how much a predicted answer differs from the ground truth based on the difference in their semantic meaning.",
+			id: "wu-palmer similarity",
+		},
+	],
+	models: [
+		{
+			description: "A visual question answering model trained to convert charts and plots to text.",
+			id: "google/deplot",
+		},
+		{
+			description:
+				"A visual question answering model trained for mathematical reasoning and chart derendering from images.",
+			id: "google/matcha-base ",
+		},
+		{
+			description: "A strong visual question answering that answers questions from book covers.",
+			id: "google/pix2struct-ocrvqa-large",
+		},
+	],
+	spaces: [
+		{
+			description: "An application that can answer questions based on images.",
+			id: "nielsr/vilt-vqa",
+		},
+		{
+			description: "An application that can caption images and answer questions about a given image. ",
+			id: "Salesforce/BLIP",
+		},
+		{
+			description: "An application that can caption images and answer questions about a given image. ",
+			id: "vumichien/Img2Prompt",
+		},
+	],
+	summary:
+		"Visual Question Answering is the task of answering open-ended questions based on an image. They output natural language responses to natural language questions.",
+	widgetModels: ["dandelin/vilt-b32-finetuned-vqa"],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/src/zero-shot-classification/about.md b/packages/tasks/src/zero-shot-classification/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..9b7ff3c48c931d3355c76aed20b891fe8f57c54b
--- /dev/null
+++ b/packages/tasks/src/zero-shot-classification/about.md
@@ -0,0 +1,40 @@
+## About the Task
+
+Zero Shot Classification is the task of predicting a class that wasn't seen by the model during training. This method, which leverages a pre-trained language model, can be thought of as an instance of [transfer learning](https://www.youtube.com/watch?v=BqqfQnyjmgg) which generally refers to using a model trained for one task in a different application than what it was originally trained for. This is particularly useful for situations where the amount of labeled data is small.
+
+In zero shot classification, we provide the model with a prompt and a sequence of text that describes what we want our model to do, in natural language. Zero-shot classification excludes any examples of the desired task being completed. This differs from single or few-shot classification, as these tasks include a single or a few examples of the selected task.
+
+Zero, single and few-shot classification seem to be an emergent feature of large language models. This feature seems to come about around model sizes of +100M parameters. The effectiveness of a model at a zero, single or few-shot task seems to scale with model size, meaning that larger models (models with more trainable parameters or layers) generally do better at this task.
+
+Here is an example of a zero-shot prompt for classifying the sentiment of a sequence of text:
+
+```
+Classify the following input text into one of the following three categories: [positive, negative, neutral]
+
+Input Text: Hugging Face is awesome for making all of these
+state of the art models available!
+Sentiment: positive
+
+```
+
+One great example of this task with a nice off-the-shelf model is available at the widget of this page, where the user can input a sequence of text and candidate labels to the model. This is a _word level_ example of zero shot classification, more elaborate and lengthy generations are available with larger models. Testing these models out and getting a feel for prompt engineering is the best way to learn how to use them.
+
+## Inference
+
+You can use the 🤗 Transformers library zero-shot-classification pipeline to infer with zero shot text classification models.
+
+```python
+from transformers import pipeline
+
+pipe = pipeline(model="facebook/bart-large-mnli")
+pipe("I have a problem with my iphone that needs to be resolved asap!",
+    candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"],
+)
+# output
+>>> {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]}
+```
+
+## Useful Resources
+
+- [Zero Shot Learning](https://joeddav.github.io/blog/2020/05/29/ZSL.html)
+- [Hugging Face on Transfer Learning](https://huggingface.co/course/en/chapter1/4?fw=pt#transfer-learning)
diff --git a/packages/tasks/src/zero-shot-classification/data.ts b/packages/tasks/src/zero-shot-classification/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..c949fd0a197936ad37dcb61850703d40771d84e8
--- /dev/null
+++ b/packages/tasks/src/zero-shot-classification/data.ts
@@ -0,0 +1,66 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			description: "A widely used dataset used to benchmark multiple variants of text classification.",
+			id: "glue",
+		},
+		{
+			description:
+				"The Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced collection of 433k sentence pairs annotated with textual entailment information.",
+			id: "MultiNLI",
+		},
+		{
+			description:
+				"FEVER is a publicly available dataset for fact extraction and verification against textual sources.",
+			id: "FEVER",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				label: "Text Input",
+				content: "Dune is the best movie ever.",
+				type: "text",
+			},
+			{
+				label: "Candidate Labels",
+				content: "CINEMA, ART, MUSIC",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				type: "chart",
+				data: [
+					{
+						label: "CINEMA",
+						score: 0.9,
+					},
+					{
+						label: "ART",
+						score: 0.1,
+					},
+					{
+						label: "MUSIC",
+						score: 0.0,
+					},
+				],
+			},
+		],
+	},
+	metrics: [],
+	models: [
+		{
+			description: "Powerful zero-shot text classification model",
+			id: "facebook/bart-large-mnli",
+		},
+	],
+	spaces: [],
+	summary:
+		"Zero-shot text classification is a task in natural language processing where a model is trained on a set of labeled examples but is then able to classify new examples from previously unseen classes.",
+	widgetModels: ["facebook/bart-large-mnli"],
+};
+
+export default taskData;
diff --git a/packages/tasks/src/zero-shot-image-classification/about.md b/packages/tasks/src/zero-shot-image-classification/about.md
new file mode 100644
index 0000000000000000000000000000000000000000..0c4b283280ac5dadcc270740ad6fcff03f18f5ba
--- /dev/null
+++ b/packages/tasks/src/zero-shot-image-classification/about.md
@@ -0,0 +1,76 @@
+## About the Task
+
+Zero-shot image classification is a computer vision task to classify images into one of several classes, without any prior training or knowledge of the classes.
+
+Zero shot image classification works by transferring knowledge learnt during training of one model, to classify novel classes that was not present in the training data. So this is a variation of [transfer learning](https://www.youtube.com/watch?v=BqqfQnyjmgg). For instance, a model trained to differentiate cars from airplanes can be used to classify images of ships.
+
+The data in this learning paradigm consists of
+
+- Seen data - images and their corresponding labels
+- Unseen data - only labels and no images
+- Auxiliary information - additional information given to the model during training connecting the unseen and seen data. This can be in the form of textual description or word embeddings.
+
+## Use Cases
+
+### Image Retrieval
+
+Zero-shot learning resolves several challenges in image retrieval systems. For example, with the rapid growth of categories on the web, it is challenging to index images based on unseen categories. With zero-shot learning we can associate unseen categories to images by exploiting attributes to model the relationships among visual features and labels.
+
+### Action Recognition
+
+Action recognition is the task of identifying when a person in an image/video is performing a given action from a set of actions. If all the possible actions are not known beforehand, conventional deep learning models fail. With zero-shot learning, for a given domain of a set of actions, we can create a mapping connecting low-level features and a semantic description of auxiliary data to classify unknown classes of actions.
+
+## Task Variants
+
+You can contribute variants of this task [here](https://github.com/huggingface/hub-docs/blob/main/tasks/src/zero-shot-image-classification/about.md).
+
+## Inference
+
+The model can be loaded with the zero-shot-image-classification pipeline like so:
+
+```python
+from transformers import pipeline
+# More models in the model hub.
+model_name = "openai/clip-vit-large-patch14-336"
+classifier = pipeline("zero-shot-image-classification", model = model_name)
+```
+
+You can then use this pipeline to classify images into any of the class names you specify. You can specify more than two class labels too.
+
+```python
+image_to_classify = "path_to_cat_and_dog_image.jpeg"
+labels_for_classification =  ["cat and dog",
+                              "lion and cheetah",
+                              "rabbit and lion"]
+scores = classifier(image_to_classify,
+                    candidate_labels = labels_for_classification)
+```
+
+The classifier would return a list of dictionaries after the inference which is stored in the variable `scores` in the code snippet above. Variable `scores` would look as follows:
+
+```python
+[{'score': 0.9950482249259949, 'label': 'cat and dog'},
+{'score': 0.004863627254962921, 'label': 'rabbit and lion'},
+{'score': 8.816882473183796e-05, 'label': 'lion and cheetah'}]
+```
+
+The dictionary at the zeroth index of the list will contain the label with the highest score.
+
+```python
+print(f"The highest score is {scores[0]['score']:.3f} for the label {scores[0]['label']}")
+```
+
+The output from the print statement above would look as follows:
+
+```
+The highest probability is 0.995 for the label cat and dog
+```
+
+## Useful Resources
+
+You can contribute useful resources about this task [here](https://github.com/huggingface/hub-docs/blob/main/tasks/src/zero-shot-image-classification/about.md).
+
+Check out [Zero-shot image classification task guide](https://huggingface.co/docs/transformers/tasks/zero_shot_image_classification).
+
+This page was made possible thanks to the efforts of [Shamima Hossain](https://huggingface.co/Shamima), [Haider Zaidi
+](https://huggingface.co/chefhaider) and [Paarth Bhatnagar](https://huggingface.co/Paarth).
diff --git a/packages/tasks/src/zero-shot-image-classification/data.ts b/packages/tasks/src/zero-shot-image-classification/data.ts
new file mode 100644
index 0000000000000000000000000000000000000000..be8da73d4c94f7a4b715fb0f8f2ec2851869cb49
--- /dev/null
+++ b/packages/tasks/src/zero-shot-image-classification/data.ts
@@ -0,0 +1,77 @@
+import type { TaskDataCustom } from "../Types";
+
+const taskData: TaskDataCustom = {
+	datasets: [
+		{
+			// TODO write proper description
+			description: "",
+			id: "",
+		},
+	],
+	demo: {
+		inputs: [
+			{
+				filename: "image-classification-input.jpeg",
+				type: "img",
+			},
+			{
+				label: "Classes",
+				content: "cat, dog, bird",
+				type: "text",
+			},
+		],
+		outputs: [
+			{
+				type: "chart",
+				data: [
+					{
+						label: "Cat",
+						score: 0.664,
+					},
+					{
+						label: "Dog",
+						score: 0.329,
+					},
+					{
+						label: "Bird",
+						score: 0.008,
+					},
+				],
+			},
+		],
+	},
+	metrics: [
+		{
+			description: "Computes the number of times the correct label appears in top K labels predicted",
+			id: "top-K accuracy",
+		},
+	],
+	models: [
+		{
+			description: "Robust image classification model trained on publicly available image-caption data.",
+			id: "openai/clip-vit-base-patch16",
+		},
+		{
+			description:
+				"Robust image classification model trained on publicly available image-caption data trained on additional high pixel data for better performance.",
+			id: "openai/clip-vit-large-patch14-336",
+		},
+		{
+			description: "Strong image classification model for biomedical domain.",
+			id: "microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224",
+		},
+	],
+	spaces: [
+		{
+			description:
+				"An application that leverages zero shot image classification to find best captions to generate an image. ",
+			id: "pharma/CLIP-Interrogator",
+		},
+	],
+	summary:
+		"Zero shot image classification is the task of classifying previously unseen classes during training of a model.",
+	widgetModels: ["openai/clip-vit-large-patch14-336"],
+	youtubeId: "",
+};
+
+export default taskData;
diff --git a/packages/tasks/tsconfig.json b/packages/tasks/tsconfig.json
new file mode 100644
index 0000000000000000000000000000000000000000..37823efde76049a185b3c599a3d9709fe765af1d
--- /dev/null
+++ b/packages/tasks/tsconfig.json
@@ -0,0 +1,18 @@
+{
+	"compilerOptions": {
+		"allowSyntheticDefaultImports": true,
+		"lib": ["ES2022", "DOM"],
+		"module": "CommonJS",
+		"moduleResolution": "node",
+		"target": "ES2022",
+		"forceConsistentCasingInFileNames": true,
+		"strict": true,
+		"noImplicitAny": true,
+		"strictNullChecks": true,
+		"skipLibCheck": true,
+		"noImplicitOverride": true,
+		"outDir": "./dist"
+	},
+	"include": ["src"],
+	"exclude": ["dist"]
+}
diff --git a/packages/widgets/.eslintignore b/packages/widgets/.eslintignore
new file mode 100644
index 0000000000000000000000000000000000000000..03cc13658b964ab78efc70b29922fced12af0f9f
--- /dev/null
+++ b/packages/widgets/.eslintignore
@@ -0,0 +1,14 @@
+.DS_Store
+node_modules
+/dist
+/build
+/.svelte-kit
+/package
+.env
+.env.*
+!.env.example
+
+# Ignore files for PNPM, NPM and YARN
+pnpm-lock.yaml
+package-lock.json
+yarn.lock
diff --git a/packages/widgets/.gitignore b/packages/widgets/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..ac7211b4033ca0cd15c21967204484e016ea6739
--- /dev/null
+++ b/packages/widgets/.gitignore
@@ -0,0 +1,11 @@
+.DS_Store
+node_modules
+/build
+/dist
+/.svelte-kit
+/package
+.env
+.env.*
+!.env.example
+vite.config.js.timestamp-*
+vite.config.ts.timestamp-*
diff --git a/packages/widgets/.prettierignore b/packages/widgets/.prettierignore
new file mode 100644
index 0000000000000000000000000000000000000000..03cc13658b964ab78efc70b29922fced12af0f9f
--- /dev/null
+++ b/packages/widgets/.prettierignore
@@ -0,0 +1,14 @@
+.DS_Store
+node_modules
+/dist
+/build
+/.svelte-kit
+/package
+.env
+.env.*
+!.env.example
+
+# Ignore files for PNPM, NPM and YARN
+pnpm-lock.yaml
+package-lock.json
+yarn.lock
diff --git a/packages/widgets/README.md b/packages/widgets/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..64d097d3ae688b6d4aaf833be3df5936100cd641
--- /dev/null
+++ b/packages/widgets/README.md
@@ -0,0 +1,18 @@
+# Huggingface Widgets
+
+Open-source version of the inference widgets from huggingface.co
+
+> Built with Svelte and SvelteKit
+
+**Demo page:** https://huggingface.co/spaces/huggingfacejs/inference-widgets
+
+You can also run the demo locally:
+
+```console
+pnpm install
+pnpm dev
+```
+
+## Publishing
+
+Because `@huggingface/widgets` depends on `@huggingface/tasks`, you need to publish `@huggingface/tasks` first, and then `@huggingface/widgets`.
diff --git a/packages/widgets/package.json b/packages/widgets/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..b8e394020e990df805b798ac8579ddaab30b5ef2
--- /dev/null
+++ b/packages/widgets/package.json
@@ -0,0 +1,59 @@
+{
+	"name": "@huggingface/widgets",
+	"packageManager": "pnpm@8.10.5",
+	"version": "0.0.4",
+	"publishConfig": {
+		"access": "public"
+	},
+	"scripts": {
+		"dev": "vite dev",
+		"build": "vite build && npm run package",
+		"preview": "vite preview",
+		"package": "svelte-kit sync && svelte-package && publint",
+		"prepublishOnly": "npm run package",
+		"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
+		"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
+		"lint": "eslint --quiet --fix --ext .cjs,.ts .",
+		"lint:check": "eslint --ext .cjs,.ts .",
+		"format": "prettier --write .",
+		"format:check": "prettier --check ."
+	},
+	"exports": {
+		".": {
+			"types": "./dist/index.d.ts",
+			"svelte": "./dist/index.js"
+		}
+	},
+	"files": [
+		"dist",
+		"src",
+		"!dist/**/*.test.*",
+		"!dist/**/*.spec.*",
+		"static/audioProcessor.js"
+	],
+	"dependencies": {
+		"@huggingface/tasks": "workspace:^"
+	},
+	"peerDependencies": {
+		"svelte": "^3.59.2"
+	},
+	"devDependencies": {
+		"@sveltejs/adapter-auto": "^2.0.0",
+		"@sveltejs/kit": "^1.27.4",
+		"@sveltejs/package": "^2.0.0",
+		"@tailwindcss/forms": "^0.5.7",
+		"autoprefixer": "^10.4.16",
+		"eslint": "^8.28.0",
+		"postcss": "^8.4.31",
+		"publint": "^0.1.9",
+		"svelte": "^3.59.2",
+		"svelte-check": "^3.6.0",
+		"tailwindcss": "^3.3.5",
+		"tslib": "^2.4.1",
+		"typescript": "^5.0.0",
+		"vite": "^4.0.0"
+	},
+	"svelte": "./dist/index.js",
+	"types": "./dist/index.d.ts",
+	"type": "module"
+}
diff --git a/packages/widgets/pnpm-lock.yaml b/packages/widgets/pnpm-lock.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4703ba0b3d6ea38e79755934691d35e664890b38
--- /dev/null
+++ b/packages/widgets/pnpm-lock.yaml
@@ -0,0 +1,1973 @@
+lockfileVersion: '6.0'
+
+settings:
+  autoInstallPeers: true
+  excludeLinksFromLockfile: false
+
+dependencies:
+  '@huggingface/tasks':
+    specifier: workspace:^
+    version: link:../tasks
+
+devDependencies:
+  '@sveltejs/adapter-auto':
+    specifier: ^2.0.0
+    version: 2.0.0(@sveltejs/kit@1.27.4)
+  '@sveltejs/kit':
+    specifier: ^1.27.4
+    version: 1.27.4(svelte@3.59.2)(vite@4.5.0)
+  '@sveltejs/package':
+    specifier: ^2.0.0
+    version: 2.0.0(svelte@3.59.2)(typescript@5.0.4)
+  '@tailwindcss/forms':
+    specifier: ^0.5.7
+    version: 0.5.7(tailwindcss@3.3.5)
+  autoprefixer:
+    specifier: ^10.4.16
+    version: 10.4.16(postcss@8.4.31)
+  eslint:
+    specifier: ^8.28.0
+    version: 8.28.0
+  postcss:
+    specifier: ^8.4.31
+    version: 8.4.31
+  publint:
+    specifier: ^0.1.9
+    version: 0.1.9
+  svelte:
+    specifier: ^3.59.2
+    version: 3.59.2
+  svelte-check:
+    specifier: ^3.6.0
+    version: 3.6.0(postcss@8.4.31)(svelte@3.59.2)
+  tailwindcss:
+    specifier: ^3.3.5
+    version: 3.3.5
+  tslib:
+    specifier: ^2.4.1
+    version: 2.4.1
+  typescript:
+    specifier: ^5.0.0
+    version: 5.0.4
+  vite:
+    specifier: ^4.0.0
+    version: 4.5.0
+
+packages:
+
+  /@aashutoshrathi/word-wrap@1.2.6:
+    resolution: {integrity: sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /@alloc/quick-lru@5.2.0:
+    resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /@esbuild/android-arm64@0.18.20:
+    resolution: {integrity: sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/android-arm@0.18.20:
+    resolution: {integrity: sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==}
+    engines: {node: '>=12'}
+    cpu: [arm]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/android-x64@0.18.20:
+    resolution: {integrity: sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/darwin-arm64@0.18.20:
+    resolution: {integrity: sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/darwin-x64@0.18.20:
+    resolution: {integrity: sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/freebsd-arm64@0.18.20:
+    resolution: {integrity: sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [freebsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/freebsd-x64@0.18.20:
+    resolution: {integrity: sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [freebsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-arm64@0.18.20:
+    resolution: {integrity: sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-arm@0.18.20:
+    resolution: {integrity: sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==}
+    engines: {node: '>=12'}
+    cpu: [arm]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-ia32@0.18.20:
+    resolution: {integrity: sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-loong64@0.18.20:
+    resolution: {integrity: sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==}
+    engines: {node: '>=12'}
+    cpu: [loong64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-mips64el@0.18.20:
+    resolution: {integrity: sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==}
+    engines: {node: '>=12'}
+    cpu: [mips64el]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-ppc64@0.18.20:
+    resolution: {integrity: sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==}
+    engines: {node: '>=12'}
+    cpu: [ppc64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-riscv64@0.18.20:
+    resolution: {integrity: sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==}
+    engines: {node: '>=12'}
+    cpu: [riscv64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-s390x@0.18.20:
+    resolution: {integrity: sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==}
+    engines: {node: '>=12'}
+    cpu: [s390x]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-x64@0.18.20:
+    resolution: {integrity: sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/netbsd-x64@0.18.20:
+    resolution: {integrity: sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [netbsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/openbsd-x64@0.18.20:
+    resolution: {integrity: sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [openbsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/sunos-x64@0.18.20:
+    resolution: {integrity: sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [sunos]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/win32-arm64@0.18.20:
+    resolution: {integrity: sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/win32-ia32@0.18.20:
+    resolution: {integrity: sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/win32-x64@0.18.20:
+    resolution: {integrity: sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@eslint/eslintrc@1.4.1:
+    resolution: {integrity: sha512-XXrH9Uarn0stsyldqDYq8r++mROmWRI1xKMXa640Bb//SY1+ECYX6VzT6Lcx5frD0V30XieqJ0oX9I2Xj5aoMA==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dependencies:
+      ajv: 6.12.6
+      debug: 4.3.4
+      espree: 9.6.1
+      globals: 13.23.0
+      ignore: 5.3.0
+      import-fresh: 3.3.0
+      js-yaml: 4.1.0
+      minimatch: 3.1.2
+      strip-json-comments: 3.1.1
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@fastify/busboy@2.1.0:
+    resolution: {integrity: sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==}
+    engines: {node: '>=14'}
+    dev: true
+
+  /@humanwhocodes/config-array@0.11.13:
+    resolution: {integrity: sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==}
+    engines: {node: '>=10.10.0'}
+    dependencies:
+      '@humanwhocodes/object-schema': 2.0.1
+      debug: 4.3.4
+      minimatch: 3.1.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@humanwhocodes/module-importer@1.0.1:
+    resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==}
+    engines: {node: '>=12.22'}
+    dev: true
+
+  /@humanwhocodes/object-schema@2.0.1:
+    resolution: {integrity: sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==}
+    dev: true
+
+  /@jridgewell/gen-mapping@0.3.3:
+    resolution: {integrity: sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==}
+    engines: {node: '>=6.0.0'}
+    dependencies:
+      '@jridgewell/set-array': 1.1.2
+      '@jridgewell/sourcemap-codec': 1.4.15
+      '@jridgewell/trace-mapping': 0.3.20
+    dev: true
+
+  /@jridgewell/resolve-uri@3.1.1:
+    resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==}
+    engines: {node: '>=6.0.0'}
+    dev: true
+
+  /@jridgewell/set-array@1.1.2:
+    resolution: {integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==}
+    engines: {node: '>=6.0.0'}
+    dev: true
+
+  /@jridgewell/sourcemap-codec@1.4.15:
+    resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==}
+    dev: true
+
+  /@jridgewell/trace-mapping@0.3.20:
+    resolution: {integrity: sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==}
+    dependencies:
+      '@jridgewell/resolve-uri': 3.1.1
+      '@jridgewell/sourcemap-codec': 1.4.15
+    dev: true
+
+  /@nodelib/fs.scandir@2.1.5:
+    resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
+    engines: {node: '>= 8'}
+    dependencies:
+      '@nodelib/fs.stat': 2.0.5
+      run-parallel: 1.2.0
+    dev: true
+
+  /@nodelib/fs.stat@2.0.5:
+    resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==}
+    engines: {node: '>= 8'}
+    dev: true
+
+  /@nodelib/fs.walk@1.2.8:
+    resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==}
+    engines: {node: '>= 8'}
+    dependencies:
+      '@nodelib/fs.scandir': 2.1.5
+      fastq: 1.15.0
+    dev: true
+
+  /@polka/url@1.0.0-next.23:
+    resolution: {integrity: sha512-C16M+IYz0rgRhWZdCmK+h58JMv8vijAA61gmz2rspCSwKwzBebpdcsiUmwrtJRdphuY30i6BSLEOP8ppbNLyLg==}
+    dev: true
+
+  /@sveltejs/adapter-auto@2.0.0(@sveltejs/kit@1.27.4):
+    resolution: {integrity: sha512-b+gkHFZgD771kgV3aO4avHFd7y1zhmMYy9i6xOK7m/rwmwaRO8gnF5zBc0Rgca80B2PMU1bKNxyBTHA14OzUAQ==}
+    peerDependencies:
+      '@sveltejs/kit': ^1.0.0
+    dependencies:
+      '@sveltejs/kit': 1.27.4(svelte@3.59.2)(vite@4.5.0)
+      import-meta-resolve: 2.2.2
+    dev: true
+
+  /@sveltejs/kit@1.27.4(svelte@3.59.2)(vite@4.5.0):
+    resolution: {integrity: sha512-Vxl8Jf0C1+/8i/slsxFOnwJntCBDLueO/O6GJ0390KUnyW3Zs+4ZiIinD+cEcYnJPQQ9CRzVSr9Bn6DbmTn4Dw==}
+    engines: {node: ^16.14 || >=18}
+    hasBin: true
+    requiresBuild: true
+    peerDependencies:
+      svelte: ^3.54.0 || ^4.0.0-next.0 || ^5.0.0-next.0
+      vite: ^4.0.0
+    dependencies:
+      '@sveltejs/vite-plugin-svelte': 2.5.2(svelte@3.59.2)(vite@4.5.0)
+      '@types/cookie': 0.5.4
+      cookie: 0.5.0
+      devalue: 4.3.2
+      esm-env: 1.0.0
+      kleur: 4.1.5
+      magic-string: 0.30.5
+      mrmime: 1.0.1
+      sade: 1.8.1
+      set-cookie-parser: 2.6.0
+      sirv: 2.0.3
+      svelte: 3.59.2
+      tiny-glob: 0.2.9
+      undici: 5.26.5
+      vite: 4.5.0
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@sveltejs/package@2.0.0(svelte@3.59.2)(typescript@5.0.4):
+    resolution: {integrity: sha512-sANz/dJibOHOe83hl8pFWUSypqefdYwPp6SUr0SmJxTNQFB5dDECEqwAwoy28DWCQFYl7DU+C1hKkTXyuKOdug==}
+    engines: {node: ^16.14 || >=18}
+    hasBin: true
+    peerDependencies:
+      svelte: ^3.44.0
+    dependencies:
+      chokidar: 3.5.3
+      kleur: 4.1.5
+      sade: 1.8.1
+      svelte: 3.59.2
+      svelte2tsx: 0.6.25(svelte@3.59.2)(typescript@5.0.4)
+    transitivePeerDependencies:
+      - typescript
+    dev: true
+
+  /@sveltejs/vite-plugin-svelte-inspector@1.0.4(@sveltejs/vite-plugin-svelte@2.5.2)(svelte@3.59.2)(vite@4.5.0):
+    resolution: {integrity: sha512-zjiuZ3yydBtwpF3bj0kQNV0YXe+iKE545QGZVTaylW3eAzFr+pJ/cwK8lZEaRp4JtaJXhD5DyWAV4AxLh6DgaQ==}
+    engines: {node: ^14.18.0 || >= 16}
+    peerDependencies:
+      '@sveltejs/vite-plugin-svelte': ^2.2.0
+      svelte: ^3.54.0 || ^4.0.0
+      vite: ^4.0.0
+    dependencies:
+      '@sveltejs/vite-plugin-svelte': 2.5.2(svelte@3.59.2)(vite@4.5.0)
+      debug: 4.3.4
+      svelte: 3.59.2
+      vite: 4.5.0
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@sveltejs/vite-plugin-svelte@2.5.2(svelte@3.59.2)(vite@4.5.0):
+    resolution: {integrity: sha512-Dfy0Rbl+IctOVfJvWGxrX/3m6vxPLH8o0x+8FA5QEyMUQMo4kGOVIojjryU7YomBAexOTAuYf1RT7809yDziaA==}
+    engines: {node: ^14.18.0 || >= 16}
+    peerDependencies:
+      svelte: ^3.54.0 || ^4.0.0 || ^5.0.0-next.0
+      vite: ^4.0.0
+    dependencies:
+      '@sveltejs/vite-plugin-svelte-inspector': 1.0.4(@sveltejs/vite-plugin-svelte@2.5.2)(svelte@3.59.2)(vite@4.5.0)
+      debug: 4.3.4
+      deepmerge: 4.3.1
+      kleur: 4.1.5
+      magic-string: 0.30.5
+      svelte: 3.59.2
+      svelte-hmr: 0.15.3(svelte@3.59.2)
+      vite: 4.5.0
+      vitefu: 0.2.5(vite@4.5.0)
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@tailwindcss/forms@0.5.7(tailwindcss@3.3.5):
+    resolution: {integrity: sha512-QE7X69iQI+ZXwldE+rzasvbJiyV/ju1FGHH0Qn2W3FKbuYtqp8LKcy6iSw79fVUT5/Vvf+0XgLCeYVG+UV6hOw==}
+    peerDependencies:
+      tailwindcss: '>=3.0.0 || >= 3.0.0-alpha.1'
+    dependencies:
+      mini-svg-data-uri: 1.4.4
+      tailwindcss: 3.3.5
+    dev: true
+
+  /@types/cookie@0.5.4:
+    resolution: {integrity: sha512-7z/eR6O859gyWIAjuvBWFzNURmf2oPBmJlfVWkwehU5nzIyjwBsTh7WMmEEV4JFnHuQ3ex4oyTvfKzcyJVDBNA==}
+    dev: true
+
+  /@types/pug@2.0.9:
+    resolution: {integrity: sha512-Yg4LkgFYvn1faISbDNWmcAC1XoDT8IoMUFspp5mnagKk+UvD2N0IWt5A7GRdMubsNWqgCLmrkf8rXkzNqb4szA==}
+    dev: true
+
+  /acorn-jsx@5.3.2(acorn@8.11.2):
+    resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==}
+    peerDependencies:
+      acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
+    dependencies:
+      acorn: 8.11.2
+    dev: true
+
+  /acorn@8.11.2:
+    resolution: {integrity: sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==}
+    engines: {node: '>=0.4.0'}
+    hasBin: true
+    dev: true
+
+  /ajv@6.12.6:
+    resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==}
+    dependencies:
+      fast-deep-equal: 3.1.3
+      fast-json-stable-stringify: 2.1.0
+      json-schema-traverse: 0.4.1
+      uri-js: 4.4.1
+    dev: true
+
+  /ansi-regex@5.0.1:
+    resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /ansi-styles@4.3.0:
+    resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==}
+    engines: {node: '>=8'}
+    dependencies:
+      color-convert: 2.0.1
+    dev: true
+
+  /any-promise@1.3.0:
+    resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==}
+    dev: true
+
+  /anymatch@3.1.3:
+    resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
+    engines: {node: '>= 8'}
+    dependencies:
+      normalize-path: 3.0.0
+      picomatch: 2.3.1
+    dev: true
+
+  /arg@5.0.2:
+    resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==}
+    dev: true
+
+  /argparse@2.0.1:
+    resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
+    dev: true
+
+  /autoprefixer@10.4.16(postcss@8.4.31):
+    resolution: {integrity: sha512-7vd3UC6xKp0HLfua5IjZlcXvGAGy7cBAXTg2lyQ/8WpNhd6SiZ8Be+xm3FyBSYJx5GKcpRCzBh7RH4/0dnY+uQ==}
+    engines: {node: ^10 || ^12 || >=14}
+    hasBin: true
+    peerDependencies:
+      postcss: ^8.1.0
+    dependencies:
+      browserslist: 4.22.1
+      caniuse-lite: 1.0.30001563
+      fraction.js: 4.3.7
+      normalize-range: 0.1.2
+      picocolors: 1.0.0
+      postcss: 8.4.31
+      postcss-value-parser: 4.2.0
+    dev: true
+
+  /balanced-match@1.0.2:
+    resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
+    dev: true
+
+  /binary-extensions@2.2.0:
+    resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /brace-expansion@1.1.11:
+    resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==}
+    dependencies:
+      balanced-match: 1.0.2
+      concat-map: 0.0.1
+    dev: true
+
+  /brace-expansion@2.0.1:
+    resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==}
+    dependencies:
+      balanced-match: 1.0.2
+    dev: true
+
+  /braces@3.0.2:
+    resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==}
+    engines: {node: '>=8'}
+    dependencies:
+      fill-range: 7.0.1
+    dev: true
+
+  /browserslist@4.22.1:
+    resolution: {integrity: sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==}
+    engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
+    hasBin: true
+    dependencies:
+      caniuse-lite: 1.0.30001563
+      electron-to-chromium: 1.4.588
+      node-releases: 2.0.13
+      update-browserslist-db: 1.0.13(browserslist@4.22.1)
+    dev: true
+
+  /buffer-crc32@0.2.13:
+    resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==}
+    dev: true
+
+  /callsites@3.1.0:
+    resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /camelcase-css@2.0.1:
+    resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==}
+    engines: {node: '>= 6'}
+    dev: true
+
+  /caniuse-lite@1.0.30001563:
+    resolution: {integrity: sha512-na2WUmOxnwIZtwnFI2CZ/3er0wdNzU7hN+cPYz/z2ajHThnkWjNBOpEPP4n+4r2WPM847JaMotaJE3bnfzjyKw==}
+    dev: true
+
+  /chalk@4.1.2:
+    resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
+    engines: {node: '>=10'}
+    dependencies:
+      ansi-styles: 4.3.0
+      supports-color: 7.2.0
+    dev: true
+
+  /chokidar@3.5.3:
+    resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==}
+    engines: {node: '>= 8.10.0'}
+    dependencies:
+      anymatch: 3.1.3
+      braces: 3.0.2
+      glob-parent: 5.1.2
+      is-binary-path: 2.1.0
+      is-glob: 4.0.3
+      normalize-path: 3.0.0
+      readdirp: 3.6.0
+    optionalDependencies:
+      fsevents: 2.3.3
+    dev: true
+
+  /color-convert@2.0.1:
+    resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==}
+    engines: {node: '>=7.0.0'}
+    dependencies:
+      color-name: 1.1.4
+    dev: true
+
+  /color-name@1.1.4:
+    resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==}
+    dev: true
+
+  /commander@4.1.1:
+    resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==}
+    engines: {node: '>= 6'}
+    dev: true
+
+  /concat-map@0.0.1:
+    resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
+    dev: true
+
+  /cookie@0.5.0:
+    resolution: {integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==}
+    engines: {node: '>= 0.6'}
+    dev: true
+
+  /cross-spawn@7.0.3:
+    resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==}
+    engines: {node: '>= 8'}
+    dependencies:
+      path-key: 3.1.1
+      shebang-command: 2.0.0
+      which: 2.0.2
+    dev: true
+
+  /cssesc@3.0.0:
+    resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==}
+    engines: {node: '>=4'}
+    hasBin: true
+    dev: true
+
+  /debug@4.3.4:
+    resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==}
+    engines: {node: '>=6.0'}
+    peerDependencies:
+      supports-color: '*'
+    peerDependenciesMeta:
+      supports-color:
+        optional: true
+    dependencies:
+      ms: 2.1.2
+    dev: true
+
+  /dedent-js@1.0.1:
+    resolution: {integrity: sha512-OUepMozQULMLUmhxS95Vudo0jb0UchLimi3+pQ2plj61Fcy8axbP9hbiD4Sz6DPqn6XG3kfmziVfQ1rSys5AJQ==}
+    dev: true
+
+  /deep-is@0.1.4:
+    resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==}
+    dev: true
+
+  /deepmerge@4.3.1:
+    resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /detect-indent@6.1.0:
+    resolution: {integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /devalue@4.3.2:
+    resolution: {integrity: sha512-KqFl6pOgOW+Y6wJgu80rHpo2/3H07vr8ntR9rkkFIRETewbf5GaYYcakYfiKz89K+sLsuPkQIZaXDMjUObZwWg==}
+    dev: true
+
+  /didyoumean@1.2.2:
+    resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==}
+    dev: true
+
+  /dlv@1.1.3:
+    resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==}
+    dev: true
+
+  /doctrine@3.0.0:
+    resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==}
+    engines: {node: '>=6.0.0'}
+    dependencies:
+      esutils: 2.0.3
+    dev: true
+
+  /electron-to-chromium@1.4.588:
+    resolution: {integrity: sha512-soytjxwbgcCu7nh5Pf4S2/4wa6UIu+A3p03U2yVr53qGxi1/VTR3ENI+p50v+UxqqZAfl48j3z55ud7VHIOr9w==}
+    dev: true
+
+  /es6-promise@3.3.1:
+    resolution: {integrity: sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==}
+    dev: true
+
+  /esbuild@0.18.20:
+    resolution: {integrity: sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==}
+    engines: {node: '>=12'}
+    hasBin: true
+    requiresBuild: true
+    optionalDependencies:
+      '@esbuild/android-arm': 0.18.20
+      '@esbuild/android-arm64': 0.18.20
+      '@esbuild/android-x64': 0.18.20
+      '@esbuild/darwin-arm64': 0.18.20
+      '@esbuild/darwin-x64': 0.18.20
+      '@esbuild/freebsd-arm64': 0.18.20
+      '@esbuild/freebsd-x64': 0.18.20
+      '@esbuild/linux-arm': 0.18.20
+      '@esbuild/linux-arm64': 0.18.20
+      '@esbuild/linux-ia32': 0.18.20
+      '@esbuild/linux-loong64': 0.18.20
+      '@esbuild/linux-mips64el': 0.18.20
+      '@esbuild/linux-ppc64': 0.18.20
+      '@esbuild/linux-riscv64': 0.18.20
+      '@esbuild/linux-s390x': 0.18.20
+      '@esbuild/linux-x64': 0.18.20
+      '@esbuild/netbsd-x64': 0.18.20
+      '@esbuild/openbsd-x64': 0.18.20
+      '@esbuild/sunos-x64': 0.18.20
+      '@esbuild/win32-arm64': 0.18.20
+      '@esbuild/win32-ia32': 0.18.20
+      '@esbuild/win32-x64': 0.18.20
+    dev: true
+
+  /escalade@3.1.1:
+    resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /escape-string-regexp@4.0.0:
+    resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /eslint-scope@7.2.2:
+    resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dependencies:
+      esrecurse: 4.3.0
+      estraverse: 5.3.0
+    dev: true
+
+  /eslint-utils@3.0.0(eslint@8.28.0):
+    resolution: {integrity: sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==}
+    engines: {node: ^10.0.0 || ^12.0.0 || >= 14.0.0}
+    peerDependencies:
+      eslint: '>=5'
+    dependencies:
+      eslint: 8.28.0
+      eslint-visitor-keys: 2.1.0
+    dev: true
+
+  /eslint-visitor-keys@2.1.0:
+    resolution: {integrity: sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /eslint-visitor-keys@3.4.3:
+    resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dev: true
+
+  /eslint@8.28.0:
+    resolution: {integrity: sha512-S27Di+EVyMxcHiwDrFzk8dJYAaD+/5SoWKxL1ri/71CRHsnJnRDPNt2Kzj24+MT9FDupf4aqqyqPrvI8MvQ4VQ==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    hasBin: true
+    dependencies:
+      '@eslint/eslintrc': 1.4.1
+      '@humanwhocodes/config-array': 0.11.13
+      '@humanwhocodes/module-importer': 1.0.1
+      '@nodelib/fs.walk': 1.2.8
+      ajv: 6.12.6
+      chalk: 4.1.2
+      cross-spawn: 7.0.3
+      debug: 4.3.4
+      doctrine: 3.0.0
+      escape-string-regexp: 4.0.0
+      eslint-scope: 7.2.2
+      eslint-utils: 3.0.0(eslint@8.28.0)
+      eslint-visitor-keys: 3.4.3
+      espree: 9.6.1
+      esquery: 1.5.0
+      esutils: 2.0.3
+      fast-deep-equal: 3.1.3
+      file-entry-cache: 6.0.1
+      find-up: 5.0.0
+      glob-parent: 6.0.2
+      globals: 13.23.0
+      grapheme-splitter: 1.0.4
+      ignore: 5.3.0
+      import-fresh: 3.3.0
+      imurmurhash: 0.1.4
+      is-glob: 4.0.3
+      is-path-inside: 3.0.3
+      js-sdsl: 4.4.2
+      js-yaml: 4.1.0
+      json-stable-stringify-without-jsonify: 1.0.1
+      levn: 0.4.1
+      lodash.merge: 4.6.2
+      minimatch: 3.1.2
+      natural-compare: 1.4.0
+      optionator: 0.9.3
+      regexpp: 3.2.0
+      strip-ansi: 6.0.1
+      strip-json-comments: 3.1.1
+      text-table: 0.2.0
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /esm-env@1.0.0:
+    resolution: {integrity: sha512-Cf6VksWPsTuW01vU9Mk/3vRue91Zevka5SjyNf3nEpokFRuqt/KjUQoGAwq9qMmhpLTHmXzSIrFRw8zxWzmFBA==}
+    dev: true
+
+  /espree@9.6.1:
+    resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dependencies:
+      acorn: 8.11.2
+      acorn-jsx: 5.3.2(acorn@8.11.2)
+      eslint-visitor-keys: 3.4.3
+    dev: true
+
+  /esquery@1.5.0:
+    resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==}
+    engines: {node: '>=0.10'}
+    dependencies:
+      estraverse: 5.3.0
+    dev: true
+
+  /esrecurse@4.3.0:
+    resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==}
+    engines: {node: '>=4.0'}
+    dependencies:
+      estraverse: 5.3.0
+    dev: true
+
+  /estraverse@5.3.0:
+    resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==}
+    engines: {node: '>=4.0'}
+    dev: true
+
+  /esutils@2.0.3:
+    resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /fast-deep-equal@3.1.3:
+    resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
+    dev: true
+
+  /fast-glob@3.3.2:
+    resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==}
+    engines: {node: '>=8.6.0'}
+    dependencies:
+      '@nodelib/fs.stat': 2.0.5
+      '@nodelib/fs.walk': 1.2.8
+      glob-parent: 5.1.2
+      merge2: 1.4.1
+      micromatch: 4.0.5
+    dev: true
+
+  /fast-json-stable-stringify@2.1.0:
+    resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==}
+    dev: true
+
+  /fast-levenshtein@2.0.6:
+    resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==}
+    dev: true
+
+  /fastq@1.15.0:
+    resolution: {integrity: sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==}
+    dependencies:
+      reusify: 1.0.4
+    dev: true
+
+  /file-entry-cache@6.0.1:
+    resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==}
+    engines: {node: ^10.12.0 || >=12.0.0}
+    dependencies:
+      flat-cache: 3.2.0
+    dev: true
+
+  /fill-range@7.0.1:
+    resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==}
+    engines: {node: '>=8'}
+    dependencies:
+      to-regex-range: 5.0.1
+    dev: true
+
+  /find-up@5.0.0:
+    resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==}
+    engines: {node: '>=10'}
+    dependencies:
+      locate-path: 6.0.0
+      path-exists: 4.0.0
+    dev: true
+
+  /flat-cache@3.2.0:
+    resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==}
+    engines: {node: ^10.12.0 || >=12.0.0}
+    dependencies:
+      flatted: 3.2.9
+      keyv: 4.5.4
+      rimraf: 3.0.2
+    dev: true
+
+  /flatted@3.2.9:
+    resolution: {integrity: sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==}
+    dev: true
+
+  /fraction.js@4.3.7:
+    resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==}
+    dev: true
+
+  /fs.realpath@1.0.0:
+    resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
+    dev: true
+
+  /fsevents@2.3.3:
+    resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
+    engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /function-bind@1.1.2:
+    resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==}
+    dev: true
+
+  /glob-parent@5.1.2:
+    resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==}
+    engines: {node: '>= 6'}
+    dependencies:
+      is-glob: 4.0.3
+    dev: true
+
+  /glob-parent@6.0.2:
+    resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==}
+    engines: {node: '>=10.13.0'}
+    dependencies:
+      is-glob: 4.0.3
+    dev: true
+
+  /glob@7.1.6:
+    resolution: {integrity: sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==}
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 3.1.2
+      once: 1.4.0
+      path-is-absolute: 1.0.1
+    dev: true
+
+  /glob@7.2.3:
+    resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 3.1.2
+      once: 1.4.0
+      path-is-absolute: 1.0.1
+    dev: true
+
+  /glob@8.1.0:
+    resolution: {integrity: sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==}
+    engines: {node: '>=12'}
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 5.1.6
+      once: 1.4.0
+    dev: true
+
+  /globals@13.23.0:
+    resolution: {integrity: sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==}
+    engines: {node: '>=8'}
+    dependencies:
+      type-fest: 0.20.2
+    dev: true
+
+  /globalyzer@0.1.0:
+    resolution: {integrity: sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q==}
+    dev: true
+
+  /globrex@0.1.2:
+    resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==}
+    dev: true
+
+  /graceful-fs@4.2.11:
+    resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==}
+    dev: true
+
+  /grapheme-splitter@1.0.4:
+    resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==}
+    dev: true
+
+  /has-flag@4.0.0:
+    resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /hasown@2.0.0:
+    resolution: {integrity: sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==}
+    engines: {node: '>= 0.4'}
+    dependencies:
+      function-bind: 1.1.2
+    dev: true
+
+  /ignore-walk@5.0.1:
+    resolution: {integrity: sha512-yemi4pMf51WKT7khInJqAvsIGzoqYXblnsz0ql8tM+yi1EKYTY1evX4NAbJrLL/Aanr2HyZeluqU+Oi7MGHokw==}
+    engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0}
+    dependencies:
+      minimatch: 5.1.6
+    dev: true
+
+  /ignore@5.3.0:
+    resolution: {integrity: sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg==}
+    engines: {node: '>= 4'}
+    dev: true
+
+  /import-fresh@3.3.0:
+    resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==}
+    engines: {node: '>=6'}
+    dependencies:
+      parent-module: 1.0.1
+      resolve-from: 4.0.0
+    dev: true
+
+  /import-meta-resolve@2.2.2:
+    resolution: {integrity: sha512-f8KcQ1D80V7RnqVm+/lirO9zkOxjGxhaTC1IPrBGd3MEfNgmNG67tSUO9gTi2F3Blr2Az6g1vocaxzkVnWl9MA==}
+    dev: true
+
+  /imurmurhash@0.1.4:
+    resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
+    engines: {node: '>=0.8.19'}
+    dev: true
+
+  /inflight@1.0.6:
+    resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==}
+    dependencies:
+      once: 1.4.0
+      wrappy: 1.0.2
+    dev: true
+
+  /inherits@2.0.4:
+    resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
+    dev: true
+
+  /is-binary-path@2.1.0:
+    resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==}
+    engines: {node: '>=8'}
+    dependencies:
+      binary-extensions: 2.2.0
+    dev: true
+
+  /is-core-module@2.13.1:
+    resolution: {integrity: sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==}
+    dependencies:
+      hasown: 2.0.0
+    dev: true
+
+  /is-extglob@2.1.1:
+    resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /is-glob@4.0.3:
+    resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
+    engines: {node: '>=0.10.0'}
+    dependencies:
+      is-extglob: 2.1.1
+    dev: true
+
+  /is-number@7.0.0:
+    resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
+    engines: {node: '>=0.12.0'}
+    dev: true
+
+  /is-path-inside@3.0.3:
+    resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /isexe@2.0.0:
+    resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==}
+    dev: true
+
+  /jiti@1.21.0:
+    resolution: {integrity: sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==}
+    hasBin: true
+    dev: true
+
+  /js-sdsl@4.4.2:
+    resolution: {integrity: sha512-dwXFwByc/ajSV6m5bcKAPwe4yDDF6D614pxmIi5odytzxRlwqF6nwoiCek80Ixc7Cvma5awClxrzFtxCQvcM8w==}
+    dev: true
+
+  /js-yaml@4.1.0:
+    resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==}
+    hasBin: true
+    dependencies:
+      argparse: 2.0.1
+    dev: true
+
+  /json-buffer@3.0.1:
+    resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==}
+    dev: true
+
+  /json-schema-traverse@0.4.1:
+    resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
+    dev: true
+
+  /json-stable-stringify-without-jsonify@1.0.1:
+    resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
+    dev: true
+
+  /keyv@4.5.4:
+    resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==}
+    dependencies:
+      json-buffer: 3.0.1
+    dev: true
+
+  /kleur@4.1.5:
+    resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /levn@0.4.1:
+    resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
+    engines: {node: '>= 0.8.0'}
+    dependencies:
+      prelude-ls: 1.2.1
+      type-check: 0.4.0
+    dev: true
+
+  /lilconfig@2.1.0:
+    resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /lilconfig@3.0.0:
+    resolution: {integrity: sha512-K2U4W2Ff5ibV7j7ydLr+zLAkIg5JJ4lPn1Ltsdt+Tz/IjQ8buJ55pZAxoP34lqIiwtF9iAvtLv3JGv7CAyAg+g==}
+    engines: {node: '>=14'}
+    dev: true
+
+  /lines-and-columns@1.2.4:
+    resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
+    dev: true
+
+  /locate-path@6.0.0:
+    resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
+    engines: {node: '>=10'}
+    dependencies:
+      p-locate: 5.0.0
+    dev: true
+
+  /lodash.merge@4.6.2:
+    resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
+    dev: true
+
+  /lower-case@2.0.2:
+    resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==}
+    dependencies:
+      tslib: 2.4.1
+    dev: true
+
+  /magic-string@0.27.0:
+    resolution: {integrity: sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==}
+    engines: {node: '>=12'}
+    dependencies:
+      '@jridgewell/sourcemap-codec': 1.4.15
+    dev: true
+
+  /magic-string@0.30.5:
+    resolution: {integrity: sha512-7xlpfBaQaP/T6Vh8MO/EqXSW5En6INHEvEXQiuff7Gku0PWjU3uf6w/j9o7O+SpB5fOAkrI5HeoNgwjEO0pFsA==}
+    engines: {node: '>=12'}
+    dependencies:
+      '@jridgewell/sourcemap-codec': 1.4.15
+    dev: true
+
+  /merge2@1.4.1:
+    resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==}
+    engines: {node: '>= 8'}
+    dev: true
+
+  /micromatch@4.0.5:
+    resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==}
+    engines: {node: '>=8.6'}
+    dependencies:
+      braces: 3.0.2
+      picomatch: 2.3.1
+    dev: true
+
+  /min-indent@1.0.1:
+    resolution: {integrity: sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==}
+    engines: {node: '>=4'}
+    dev: true
+
+  /mini-svg-data-uri@1.4.4:
+    resolution: {integrity: sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg==}
+    hasBin: true
+    dev: true
+
+  /minimatch@3.1.2:
+    resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
+    dependencies:
+      brace-expansion: 1.1.11
+    dev: true
+
+  /minimatch@5.1.6:
+    resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==}
+    engines: {node: '>=10'}
+    dependencies:
+      brace-expansion: 2.0.1
+    dev: true
+
+  /minimist@1.2.8:
+    resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
+    dev: true
+
+  /mkdirp@0.5.6:
+    resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==}
+    hasBin: true
+    dependencies:
+      minimist: 1.2.8
+    dev: true
+
+  /mri@1.2.0:
+    resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==}
+    engines: {node: '>=4'}
+    dev: true
+
+  /mrmime@1.0.1:
+    resolution: {integrity: sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /ms@2.1.2:
+    resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==}
+    dev: true
+
+  /mz@2.7.0:
+    resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==}
+    dependencies:
+      any-promise: 1.3.0
+      object-assign: 4.1.1
+      thenify-all: 1.6.0
+    dev: true
+
+  /nanoid@3.3.7:
+    resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==}
+    engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
+    hasBin: true
+    dev: true
+
+  /natural-compare@1.4.0:
+    resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
+    dev: true
+
+  /no-case@3.0.4:
+    resolution: {integrity: sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==}
+    dependencies:
+      lower-case: 2.0.2
+      tslib: 2.4.1
+    dev: true
+
+  /node-releases@2.0.13:
+    resolution: {integrity: sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==}
+    dev: true
+
+  /normalize-path@3.0.0:
+    resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /normalize-range@0.1.2:
+    resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /npm-bundled@2.0.1:
+    resolution: {integrity: sha512-gZLxXdjEzE/+mOstGDqR6b0EkhJ+kM6fxM6vUuckuctuVPh80Q6pw/rSZj9s4Gex9GxWtIicO1pc8DB9KZWudw==}
+    engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0}
+    dependencies:
+      npm-normalize-package-bin: 2.0.0
+    dev: true
+
+  /npm-normalize-package-bin@2.0.0:
+    resolution: {integrity: sha512-awzfKUO7v0FscrSpRoogyNm0sajikhBWpU0QMrW09AMi9n1PoKU6WaIqUzuJSQnpciZZmJ/jMZ2Egfmb/9LiWQ==}
+    engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0}
+    dev: true
+
+  /npm-packlist@5.1.3:
+    resolution: {integrity: sha512-263/0NGrn32YFYi4J533qzrQ/krmmrWwhKkzwTuM4f/07ug51odoaNjUexxO4vxlzURHcmYMH1QjvHjsNDKLVg==}
+    engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0}
+    hasBin: true
+    dependencies:
+      glob: 8.1.0
+      ignore-walk: 5.0.1
+      npm-bundled: 2.0.1
+      npm-normalize-package-bin: 2.0.0
+    dev: true
+
+  /object-assign@4.1.1:
+    resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /object-hash@3.0.0:
+    resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==}
+    engines: {node: '>= 6'}
+    dev: true
+
+  /once@1.4.0:
+    resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
+    dependencies:
+      wrappy: 1.0.2
+    dev: true
+
+  /optionator@0.9.3:
+    resolution: {integrity: sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==}
+    engines: {node: '>= 0.8.0'}
+    dependencies:
+      '@aashutoshrathi/word-wrap': 1.2.6
+      deep-is: 0.1.4
+      fast-levenshtein: 2.0.6
+      levn: 0.4.1
+      prelude-ls: 1.2.1
+      type-check: 0.4.0
+    dev: true
+
+  /p-limit@3.1.0:
+    resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==}
+    engines: {node: '>=10'}
+    dependencies:
+      yocto-queue: 0.1.0
+    dev: true
+
+  /p-locate@5.0.0:
+    resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==}
+    engines: {node: '>=10'}
+    dependencies:
+      p-limit: 3.1.0
+    dev: true
+
+  /parent-module@1.0.1:
+    resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
+    engines: {node: '>=6'}
+    dependencies:
+      callsites: 3.1.0
+    dev: true
+
+  /pascal-case@3.1.2:
+    resolution: {integrity: sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==}
+    dependencies:
+      no-case: 3.0.4
+      tslib: 2.4.1
+    dev: true
+
+  /path-exists@4.0.0:
+    resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /path-is-absolute@1.0.1:
+    resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /path-key@3.1.1:
+    resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /path-parse@1.0.7:
+    resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
+    dev: true
+
+  /picocolors@1.0.0:
+    resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==}
+    dev: true
+
+  /picomatch@2.3.1:
+    resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
+    engines: {node: '>=8.6'}
+    dev: true
+
+  /pify@2.3.0:
+    resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /pirates@4.0.6:
+    resolution: {integrity: sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==}
+    engines: {node: '>= 6'}
+    dev: true
+
+  /postcss-import@15.1.0(postcss@8.4.31):
+    resolution: {integrity: sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==}
+    engines: {node: '>=14.0.0'}
+    peerDependencies:
+      postcss: ^8.0.0
+    dependencies:
+      postcss: 8.4.31
+      postcss-value-parser: 4.2.0
+      read-cache: 1.0.0
+      resolve: 1.22.8
+    dev: true
+
+  /postcss-js@4.0.1(postcss@8.4.31):
+    resolution: {integrity: sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==}
+    engines: {node: ^12 || ^14 || >= 16}
+    peerDependencies:
+      postcss: ^8.4.21
+    dependencies:
+      camelcase-css: 2.0.1
+      postcss: 8.4.31
+    dev: true
+
+  /postcss-load-config@4.0.2(postcss@8.4.31):
+    resolution: {integrity: sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==}
+    engines: {node: '>= 14'}
+    peerDependencies:
+      postcss: '>=8.0.9'
+      ts-node: '>=9.0.0'
+    peerDependenciesMeta:
+      postcss:
+        optional: true
+      ts-node:
+        optional: true
+    dependencies:
+      lilconfig: 3.0.0
+      postcss: 8.4.31
+      yaml: 2.3.4
+    dev: true
+
+  /postcss-nested@6.0.1(postcss@8.4.31):
+    resolution: {integrity: sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==}
+    engines: {node: '>=12.0'}
+    peerDependencies:
+      postcss: ^8.2.14
+    dependencies:
+      postcss: 8.4.31
+      postcss-selector-parser: 6.0.13
+    dev: true
+
+  /postcss-selector-parser@6.0.13:
+    resolution: {integrity: sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==}
+    engines: {node: '>=4'}
+    dependencies:
+      cssesc: 3.0.0
+      util-deprecate: 1.0.2
+    dev: true
+
+  /postcss-value-parser@4.2.0:
+    resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==}
+    dev: true
+
+  /postcss@8.4.31:
+    resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==}
+    engines: {node: ^10 || ^12 || >=14}
+    dependencies:
+      nanoid: 3.3.7
+      picocolors: 1.0.0
+      source-map-js: 1.0.2
+    dev: true
+
+  /prelude-ls@1.2.1:
+    resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==}
+    engines: {node: '>= 0.8.0'}
+    dev: true
+
+  /publint@0.1.9:
+    resolution: {integrity: sha512-O53y7vbePxuGFmEjgcrafMSlDpOJwOkj8YdexOt7yWlv7SB3rXoT3mHknyMJ3lf2UFH5Bmt6tnIkHcOTR6dEoA==}
+    engines: {node: '>=16'}
+    hasBin: true
+    dependencies:
+      npm-packlist: 5.1.3
+      picocolors: 1.0.0
+      sade: 1.8.1
+    dev: true
+
+  /punycode@2.3.1:
+    resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /queue-microtask@1.2.3:
+    resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
+    dev: true
+
+  /read-cache@1.0.0:
+    resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==}
+    dependencies:
+      pify: 2.3.0
+    dev: true
+
+  /readdirp@3.6.0:
+    resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
+    engines: {node: '>=8.10.0'}
+    dependencies:
+      picomatch: 2.3.1
+    dev: true
+
+  /regexpp@3.2.0:
+    resolution: {integrity: sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /resolve-from@4.0.0:
+    resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==}
+    engines: {node: '>=4'}
+    dev: true
+
+  /resolve@1.22.8:
+    resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==}
+    hasBin: true
+    dependencies:
+      is-core-module: 2.13.1
+      path-parse: 1.0.7
+      supports-preserve-symlinks-flag: 1.0.0
+    dev: true
+
+  /reusify@1.0.4:
+    resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==}
+    engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
+    dev: true
+
+  /rimraf@2.7.1:
+    resolution: {integrity: sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==}
+    hasBin: true
+    dependencies:
+      glob: 7.2.3
+    dev: true
+
+  /rimraf@3.0.2:
+    resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==}
+    hasBin: true
+    dependencies:
+      glob: 7.2.3
+    dev: true
+
+  /rollup@3.29.4:
+    resolution: {integrity: sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==}
+    engines: {node: '>=14.18.0', npm: '>=8.0.0'}
+    hasBin: true
+    optionalDependencies:
+      fsevents: 2.3.3
+    dev: true
+
+  /run-parallel@1.2.0:
+    resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==}
+    dependencies:
+      queue-microtask: 1.2.3
+    dev: true
+
+  /sade@1.8.1:
+    resolution: {integrity: sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==}
+    engines: {node: '>=6'}
+    dependencies:
+      mri: 1.2.0
+    dev: true
+
+  /sander@0.5.1:
+    resolution: {integrity: sha512-3lVqBir7WuKDHGrKRDn/1Ye3kwpXaDOMsiRP1wd6wpZW56gJhsbp5RqQpA6JG/P+pkXizygnr1dKR8vzWaVsfA==}
+    dependencies:
+      es6-promise: 3.3.1
+      graceful-fs: 4.2.11
+      mkdirp: 0.5.6
+      rimraf: 2.7.1
+    dev: true
+
+  /set-cookie-parser@2.6.0:
+    resolution: {integrity: sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==}
+    dev: true
+
+  /shebang-command@2.0.0:
+    resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==}
+    engines: {node: '>=8'}
+    dependencies:
+      shebang-regex: 3.0.0
+    dev: true
+
+  /shebang-regex@3.0.0:
+    resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /sirv@2.0.3:
+    resolution: {integrity: sha512-O9jm9BsID1P+0HOi81VpXPoDxYP374pkOLzACAoyUQ/3OUVndNpsz6wMnY2z+yOxzbllCKZrM+9QrWsv4THnyA==}
+    engines: {node: '>= 10'}
+    dependencies:
+      '@polka/url': 1.0.0-next.23
+      mrmime: 1.0.1
+      totalist: 3.0.1
+    dev: true
+
+  /sorcery@0.11.0:
+    resolution: {integrity: sha512-J69LQ22xrQB1cIFJhPfgtLuI6BpWRiWu1Y3vSsIwK/eAScqJxd/+CJlUuHQRdX2C9NGFamq+KqNywGgaThwfHw==}
+    hasBin: true
+    dependencies:
+      '@jridgewell/sourcemap-codec': 1.4.15
+      buffer-crc32: 0.2.13
+      minimist: 1.2.8
+      sander: 0.5.1
+    dev: true
+
+  /source-map-js@1.0.2:
+    resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /strip-ansi@6.0.1:
+    resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==}
+    engines: {node: '>=8'}
+    dependencies:
+      ansi-regex: 5.0.1
+    dev: true
+
+  /strip-indent@3.0.0:
+    resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==}
+    engines: {node: '>=8'}
+    dependencies:
+      min-indent: 1.0.1
+    dev: true
+
+  /strip-json-comments@3.1.1:
+    resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /sucrase@3.34.0:
+    resolution: {integrity: sha512-70/LQEZ07TEcxiU2dz51FKaE6hCTWC6vr7FOk3Gr0U60C3shtAN+H+BFr9XlYe5xqf3RA8nrc+VIwzCfnxuXJw==}
+    engines: {node: '>=8'}
+    hasBin: true
+    dependencies:
+      '@jridgewell/gen-mapping': 0.3.3
+      commander: 4.1.1
+      glob: 7.1.6
+      lines-and-columns: 1.2.4
+      mz: 2.7.0
+      pirates: 4.0.6
+      ts-interface-checker: 0.1.13
+    dev: true
+
+  /supports-color@7.2.0:
+    resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==}
+    engines: {node: '>=8'}
+    dependencies:
+      has-flag: 4.0.0
+    dev: true
+
+  /supports-preserve-symlinks-flag@1.0.0:
+    resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==}
+    engines: {node: '>= 0.4'}
+    dev: true
+
+  /svelte-check@3.6.0(postcss@8.4.31)(svelte@3.59.2):
+    resolution: {integrity: sha512-8VfqhfuRJ1sKW+o8isH2kPi0RhjXH1nNsIbCFGyoUHG+ZxVxHYRKcb+S8eaL/1tyj3VGvWYx3Y5+oCUsJgnzcw==}
+    hasBin: true
+    peerDependencies:
+      svelte: ^3.55.0 || ^4.0.0-next.0 || ^4.0.0 || ^5.0.0-next.0
+    dependencies:
+      '@jridgewell/trace-mapping': 0.3.20
+      chokidar: 3.5.3
+      fast-glob: 3.3.2
+      import-fresh: 3.3.0
+      picocolors: 1.0.0
+      sade: 1.8.1
+      svelte: 3.59.2
+      svelte-preprocess: 5.1.0(postcss@8.4.31)(svelte@3.59.2)(typescript@5.0.4)
+      typescript: 5.0.4
+    transitivePeerDependencies:
+      - '@babel/core'
+      - coffeescript
+      - less
+      - postcss
+      - postcss-load-config
+      - pug
+      - sass
+      - stylus
+      - sugarss
+    dev: true
+
+  /svelte-hmr@0.15.3(svelte@3.59.2):
+    resolution: {integrity: sha512-41snaPswvSf8TJUhlkoJBekRrABDXDMdpNpT2tfHIv4JuhgvHqLMhEPGtaQn0BmbNSTkuz2Ed20DF2eHw0SmBQ==}
+    engines: {node: ^12.20 || ^14.13.1 || >= 16}
+    peerDependencies:
+      svelte: ^3.19.0 || ^4.0.0
+    dependencies:
+      svelte: 3.59.2
+    dev: true
+
+  /svelte-preprocess@5.1.0(postcss@8.4.31)(svelte@3.59.2)(typescript@5.0.4):
+    resolution: {integrity: sha512-EkErPiDzHAc0k2MF5m6vBNmRUh338h2myhinUw/xaqsLs7/ZvsgREiLGj03VrSzbY/TB5ZXgBOsKraFee5yceA==}
+    engines: {node: '>= 14.10.0'}
+    requiresBuild: true
+    peerDependencies:
+      '@babel/core': ^7.10.2
+      coffeescript: ^2.5.1
+      less: ^3.11.3 || ^4.0.0
+      postcss: ^7 || ^8
+      postcss-load-config: ^2.1.0 || ^3.0.0 || ^4.0.0
+      pug: ^3.0.0
+      sass: ^1.26.8
+      stylus: ^0.55.0
+      sugarss: ^2.0.0 || ^3.0.0 || ^4.0.0
+      svelte: ^3.23.0 || ^4.0.0-next.0 || ^4.0.0 || ^5.0.0-next.0
+      typescript: '>=3.9.5 || ^4.0.0 || ^5.0.0'
+    peerDependenciesMeta:
+      '@babel/core':
+        optional: true
+      coffeescript:
+        optional: true
+      less:
+        optional: true
+      postcss:
+        optional: true
+      postcss-load-config:
+        optional: true
+      pug:
+        optional: true
+      sass:
+        optional: true
+      stylus:
+        optional: true
+      sugarss:
+        optional: true
+      typescript:
+        optional: true
+    dependencies:
+      '@types/pug': 2.0.9
+      detect-indent: 6.1.0
+      magic-string: 0.27.0
+      postcss: 8.4.31
+      sorcery: 0.11.0
+      strip-indent: 3.0.0
+      svelte: 3.59.2
+      typescript: 5.0.4
+    dev: true
+
+  /svelte2tsx@0.6.25(svelte@3.59.2)(typescript@5.0.4):
+    resolution: {integrity: sha512-hhBKL5X9gGvKQAZ9xLoHnbE9Yb00HxEZJlxcj2drxWK+Tpqcs/bnodjSfCGbqEhvNaUXYNbVL7s4dEXT+o0f6w==}
+    peerDependencies:
+      svelte: ^3.55 || ^4.0.0-next.0 || ^4.0 || ^5.0.0-next.0
+      typescript: ^4.9.4 || ^5.0.0
+    dependencies:
+      dedent-js: 1.0.1
+      pascal-case: 3.1.2
+      svelte: 3.59.2
+      typescript: 5.0.4
+    dev: true
+
+  /svelte@3.59.2:
+    resolution: {integrity: sha512-vzSyuGr3eEoAtT/A6bmajosJZIUWySzY2CzB3w2pgPvnkUjGqlDnsNnA0PMO+mMAhuyMul6C2uuZzY6ELSkzyA==}
+    engines: {node: '>= 8'}
+    dev: true
+
+  /tailwindcss@3.3.5:
+    resolution: {integrity: sha512-5SEZU4J7pxZgSkv7FP1zY8i2TIAOooNZ1e/OGtxIEv6GltpoiXUqWvLy89+a10qYTB1N5Ifkuw9lqQkN9sscvA==}
+    engines: {node: '>=14.0.0'}
+    hasBin: true
+    dependencies:
+      '@alloc/quick-lru': 5.2.0
+      arg: 5.0.2
+      chokidar: 3.5.3
+      didyoumean: 1.2.2
+      dlv: 1.1.3
+      fast-glob: 3.3.2
+      glob-parent: 6.0.2
+      is-glob: 4.0.3
+      jiti: 1.21.0
+      lilconfig: 2.1.0
+      micromatch: 4.0.5
+      normalize-path: 3.0.0
+      object-hash: 3.0.0
+      picocolors: 1.0.0
+      postcss: 8.4.31
+      postcss-import: 15.1.0(postcss@8.4.31)
+      postcss-js: 4.0.1(postcss@8.4.31)
+      postcss-load-config: 4.0.2(postcss@8.4.31)
+      postcss-nested: 6.0.1(postcss@8.4.31)
+      postcss-selector-parser: 6.0.13
+      resolve: 1.22.8
+      sucrase: 3.34.0
+    transitivePeerDependencies:
+      - ts-node
+    dev: true
+
+  /text-table@0.2.0:
+    resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
+    dev: true
+
+  /thenify-all@1.6.0:
+    resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==}
+    engines: {node: '>=0.8'}
+    dependencies:
+      thenify: 3.3.1
+    dev: true
+
+  /thenify@3.3.1:
+    resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==}
+    dependencies:
+      any-promise: 1.3.0
+    dev: true
+
+  /tiny-glob@0.2.9:
+    resolution: {integrity: sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==}
+    dependencies:
+      globalyzer: 0.1.0
+      globrex: 0.1.2
+    dev: true
+
+  /to-regex-range@5.0.1:
+    resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
+    engines: {node: '>=8.0'}
+    dependencies:
+      is-number: 7.0.0
+    dev: true
+
+  /totalist@3.0.1:
+    resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /ts-interface-checker@0.1.13:
+    resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==}
+    dev: true
+
+  /tslib@2.4.1:
+    resolution: {integrity: sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA==}
+    dev: true
+
+  /type-check@0.4.0:
+    resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==}
+    engines: {node: '>= 0.8.0'}
+    dependencies:
+      prelude-ls: 1.2.1
+    dev: true
+
+  /type-fest@0.20.2:
+    resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /typescript@5.0.4:
+    resolution: {integrity: sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==}
+    engines: {node: '>=12.20'}
+    hasBin: true
+    dev: true
+
+  /undici@5.26.5:
+    resolution: {integrity: sha512-cSb4bPFd5qgR7qr2jYAi0hlX9n5YKK2ONKkLFkxl+v/9BvC0sOpZjBHDBSXc5lWAf5ty9oZdRXytBIHzgUcerw==}
+    engines: {node: '>=14.0'}
+    dependencies:
+      '@fastify/busboy': 2.1.0
+    dev: true
+
+  /update-browserslist-db@1.0.13(browserslist@4.22.1):
+    resolution: {integrity: sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==}
+    hasBin: true
+    peerDependencies:
+      browserslist: '>= 4.21.0'
+    dependencies:
+      browserslist: 4.22.1
+      escalade: 3.1.1
+      picocolors: 1.0.0
+    dev: true
+
+  /uri-js@4.4.1:
+    resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==}
+    dependencies:
+      punycode: 2.3.1
+    dev: true
+
+  /util-deprecate@1.0.2:
+    resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
+    dev: true
+
+  /vite@4.5.0:
+    resolution: {integrity: sha512-ulr8rNLA6rkyFAlVWw2q5YJ91v098AFQ2R0PRFwPzREXOUJQPtFUG0t+/ZikhaOCDqFoDhN6/v8Sq0o4araFAw==}
+    engines: {node: ^14.18.0 || >=16.0.0}
+    hasBin: true
+    peerDependencies:
+      '@types/node': '>= 14'
+      less: '*'
+      lightningcss: ^1.21.0
+      sass: '*'
+      stylus: '*'
+      sugarss: '*'
+      terser: ^5.4.0
+    peerDependenciesMeta:
+      '@types/node':
+        optional: true
+      less:
+        optional: true
+      lightningcss:
+        optional: true
+      sass:
+        optional: true
+      stylus:
+        optional: true
+      sugarss:
+        optional: true
+      terser:
+        optional: true
+    dependencies:
+      esbuild: 0.18.20
+      postcss: 8.4.31
+      rollup: 3.29.4
+    optionalDependencies:
+      fsevents: 2.3.3
+    dev: true
+
+  /vitefu@0.2.5(vite@4.5.0):
+    resolution: {integrity: sha512-SgHtMLoqaeeGnd2evZ849ZbACbnwQCIwRH57t18FxcXoZop0uQu0uzlIhJBlF/eWVzuce0sHeqPcDo+evVcg8Q==}
+    peerDependencies:
+      vite: ^3.0.0 || ^4.0.0 || ^5.0.0
+    peerDependenciesMeta:
+      vite:
+        optional: true
+    dependencies:
+      vite: 4.5.0
+    dev: true
+
+  /which@2.0.2:
+    resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==}
+    engines: {node: '>= 8'}
+    hasBin: true
+    dependencies:
+      isexe: 2.0.0
+    dev: true
+
+  /wrappy@1.0.2:
+    resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
+    dev: true
+
+  /yaml@2.3.4:
+    resolution: {integrity: sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA==}
+    engines: {node: '>= 14'}
+    dev: true
+
+  /yocto-queue@0.1.0:
+    resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
+    engines: {node: '>=10'}
+    dev: true
diff --git a/packages/widgets/postcss.config.js b/packages/widgets/postcss.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..7b75c83aff1c05e0e0e315638e07a22314603d4d
--- /dev/null
+++ b/packages/widgets/postcss.config.js
@@ -0,0 +1,6 @@
+export default {
+	plugins: {
+		tailwindcss: {},
+		autoprefixer: {},
+	},
+};
diff --git a/packages/widgets/src/app.d.ts b/packages/widgets/src/app.d.ts
new file mode 100644
index 0000000000000000000000000000000000000000..f59b884c51ed3c31fc0738fd38d0d75b580df5e4
--- /dev/null
+++ b/packages/widgets/src/app.d.ts
@@ -0,0 +1,12 @@
+// See https://kit.svelte.dev/docs/types#app
+// for information about these interfaces
+declare global {
+	namespace App {
+		// interface Error {}
+		// interface Locals {}
+		// interface PageData {}
+		// interface Platform {}
+	}
+}
+
+export {};
diff --git a/packages/widgets/src/app.html b/packages/widgets/src/app.html
new file mode 100644
index 0000000000000000000000000000000000000000..f22aeaad5e392f5121f38b7d9e6ba033438b3103
--- /dev/null
+++ b/packages/widgets/src/app.html
@@ -0,0 +1,12 @@
+<!doctype html>
+<html lang="en">
+	<head>
+		<meta charset="utf-8" />
+		<link rel="icon" href="%sveltekit.assets%/favicon.png" />
+		<meta name="viewport" content="width=device-width, initial-scale=1" />
+		%sveltekit.head%
+	</head>
+	<body data-sveltekit-preload-data="hover">
+		<div>%sveltekit.body%</div>
+	</body>
+</html>
diff --git a/packages/widgets/src/lib/components/DemoThemeSwitcher/DemoThemeSwitcher.svelte b/packages/widgets/src/lib/components/DemoThemeSwitcher/DemoThemeSwitcher.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..244e51cc6740cfef9d2ffea8b524c58a4abaa5fd
--- /dev/null
+++ b/packages/widgets/src/lib/components/DemoThemeSwitcher/DemoThemeSwitcher.svelte
@@ -0,0 +1,60 @@
+<script lang="ts">
+	import { onMount } from "svelte";
+	let darkMode = false;
+	const THEME_KEY = "themePreference";
+
+	function setDarkTheme(dark: boolean) {
+		darkMode = dark;
+		document.documentElement.classList.toggle("dark", darkMode);
+	}
+
+	function toggleMode() {
+		setDarkTheme(!darkMode);
+		window.localStorage.setItem(THEME_KEY, darkMode ? "dark" : "light");
+	}
+
+	onMount(() => {
+		const theme = window.localStorage.getItem(THEME_KEY);
+		if (theme === "dark") {
+			setDarkTheme(true);
+		} else if (theme === null && window.matchMedia("(prefers-color-scheme: dark)").matches) {
+			setDarkTheme(true);
+		}
+	});
+</script>
+
+<button type="button" class="absolute top-0 right-0 h-8 w-8 cursor-pointer p-2" on:click={toggleMode}>
+	{#if darkMode}
+		<svg
+			aria-hidden="true"
+			focusable="false"
+			data-prefix="far"
+			data-icon="moon"
+			class="svg-inline--fa fa-moon fa-w-16 text-gray-100"
+			role="img"
+			xmlns="http://www.w3.org/2000/svg"
+			viewBox="0 0 512 512"
+		>
+			<path
+				fill="currentColor"
+				d="M279.135 512c78.756 0 150.982-35.804 198.844-94.775 28.27-34.831-2.558-85.722-46.249-77.401-82.348 15.683-158.272-47.268-158.272-130.792 0-48.424 26.06-92.292 67.434-115.836 38.745-22.05 28.999-80.788-15.022-88.919A257.936 257.936 0 0 0 279.135 0c-141.36 0-256 114.575-256 256 0 141.36 114.576 256 256 256zm0-464c12.985 0 25.689 1.201 38.016 3.478-54.76 31.163-91.693 90.042-91.693 157.554 0 113.848 103.641 199.2 215.252 177.944C402.574 433.964 344.366 464 279.135 464c-114.875 0-208-93.125-208-208s93.125-208 208-208z"
+			/>
+		</svg>
+	{:else}
+		<svg
+			aria-hidden="true"
+			focusable="false"
+			data-prefix="far"
+			data-icon="sun"
+			class="svg-inline--fa fa-sun fa-w-16"
+			role="img"
+			xmlns="http://www.w3.org/2000/svg"
+			viewBox="0 0 512 512"
+		>
+			<path
+				fill="currentColor"
+				d="M494.2 221.9l-59.8-40.5 13.7-71c2.6-13.2-1.6-26.8-11.1-36.4-9.6-9.5-23.2-13.7-36.2-11.1l-70.9 13.7-40.4-59.9c-15.1-22.3-51.9-22.3-67 0l-40.4 59.9-70.8-13.7C98 60.4 84.5 64.5 75 74.1c-9.5 9.6-13.7 23.1-11.1 36.3l13.7 71-59.8 40.5C6.6 229.5 0 242 0 255.5s6.7 26 17.8 33.5l59.8 40.5-13.7 71c-2.6 13.2 1.6 26.8 11.1 36.3 9.5 9.5 22.9 13.7 36.3 11.1l70.8-13.7 40.4 59.9C230 505.3 242.6 512 256 512s26-6.7 33.5-17.8l40.4-59.9 70.9 13.7c13.4 2.7 26.8-1.6 36.3-11.1 9.5-9.5 13.6-23.1 11.1-36.3l-13.7-71 59.8-40.5c11.1-7.5 17.8-20.1 17.8-33.5-.1-13.6-6.7-26.1-17.9-33.7zm-112.9 85.6l17.6 91.2-91-17.6L256 458l-51.9-77-90.9 17.6 17.6-91.2-76.8-52 76.8-52-17.6-91.2 91 17.6L256 53l51.9 76.9 91-17.6-17.6 91.1 76.8 52-76.8 52.1zM256 152c-57.3 0-104 46.7-104 104s46.7 104 104 104 104-46.7 104-104-46.7-104-104-104zm0 160c-30.9 0-56-25.1-56-56s25.1-56 56-56 56 25.1 56 56-25.1 56-56 56z"
+			/>
+		</svg>
+	{/if}
+</button>
diff --git a/packages/widgets/src/lib/components/Icons/IconAudioClassification.svelte b/packages/widgets/src/lib/components/Icons/IconAudioClassification.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..963570923853f7e72c2c4c31e24c8e8f45dd32f3
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconAudioClassification.svelte
@@ -0,0 +1,21 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M25 4H10a2.002 2.002 0 0 0-2 2v14.556A3.955 3.955 0 0 0 6 20a4 4 0 1 0 4 4V12h15v8.556A3.954 3.954 0 0 0 23 20a4 4 0 1 0 4 4V6a2.002 2.002 0 0 0-2-2zM6 26a2 2 0 1 1 2-2a2.002 2.002 0 0 1-2 2zm17 0a2 2 0 1 1 2-2a2.003 2.003 0 0 1-2 2zM10 6h15v4H10z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconAudioToAudio.svelte b/packages/widgets/src/lib/components/Icons/IconAudioToAudio.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..ae5c4aeef4cbaded1451e7672855dca33c8a5550
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconAudioToAudio.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path
+		fill-rule="evenodd"
+		clip-rule="evenodd"
+		d="M4.5 11.12H5C5.26513 11.1203 5.51933 11.2257 5.70681 11.4132C5.89429 11.6007 5.99973 11.8549 6 12.12V13.12C5.99973 13.3851 5.89429 13.6393 5.70681 13.8268C5.51933 14.0143 5.26513 14.1197 5 14.12H4.5V16.12H3.5V14.12H3C2.73486 14.1197 2.48066 14.0143 2.29319 13.8268C2.10571 13.6393 2.00026 13.3851 2 13.12V12.12C2.00026 11.8549 2.10571 11.6007 2.29319 11.4132C2.48066 11.2257 2.73486 11.1203 3 11.12H3.5V2.12H4.5V11.12ZM3 13.12H5V12.12H3V13.12ZM10 4.12H9.5V2.12H8.5V4.12H8C7.73487 4.12027 7.48067 4.22571 7.29319 4.41319C7.10571 4.60067 7.00027 4.85487 7 5.12V6.12C7.00027 6.38514 7.10571 6.63934 7.29319 6.82682C7.48067 7.01429 7.73487 7.11974 8 7.12H8.5V16.12H9.5V7.12H10C10.2651 7.11974 10.5193 7.01429 10.7068 6.82682C10.8943 6.63934 10.9997 6.38514 11 6.12V5.12C10.9997 4.85487 10.8943 4.60067 10.7068 4.41319C10.5193 4.22571 10.2651 4.12027 10 4.12ZM10 6.12H8V5.12H10V6.12ZM15 8.12H14.5V2.12H13.5V8.12H13C12.7349 8.12027 12.4807 8.22571 12.2932 8.41319C12.1057 8.60067 12.0003 8.85486 12 9.12V10.12C12.0003 10.3851 12.1057 10.6393 12.2932 10.8268C12.4807 11.0143 12.7349 11.1197 13 11.12H13.5V16.12H14.5V11.12H15C15.2651 11.1196 15.5192 11.0141 15.7067 10.8267C15.8941 10.6392 15.9996 10.3851 16 10.12V9.12C15.9997 8.85486 15.8943 8.60067 15.7068 8.41319C15.5193 8.22571 15.2651 8.12027 15 8.12ZM15 10.12H13V9.12H15V10.12Z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconAutomaticSpeechRecognition.svelte b/packages/widgets/src/lib/components/Icons/IconAutomaticSpeechRecognition.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..01b18c541c81c064f42cbe3b5fd74051b1b1563f
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconAutomaticSpeechRecognition.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path
+		fill-rule="evenodd"
+		clip-rule="evenodd"
+		d="M8.38893 3.42133C7.9778 3.14662 7.49446 3 7 3C6.33696 3 5.70108 3.26339 5.23223 3.73223C4.76339 4.20107 4.5 4.83696 4.5 5.5C4.5 5.99445 4.64662 6.4778 4.92133 6.88893C5.19603 7.30005 5.58648 7.62048 6.04329 7.8097C6.50011 7.99892 7.00278 8.04843 7.48773 7.95196C7.97268 7.8555 8.41814 7.6174 8.76777 7.26777C9.1174 6.91814 9.3555 6.47268 9.45197 5.98773C9.54843 5.50277 9.49892 5.00011 9.3097 4.54329C9.12048 4.08648 8.80005 3.69603 8.38893 3.42133ZM5.05551 2.58986C5.63108 2.20527 6.30777 2 7 2C7.92826 2 8.8185 2.36875 9.47488 3.02513C10.1313 3.6815 10.5 4.57174 10.5 5.5C10.5 6.19223 10.2947 6.86892 9.91015 7.4445C9.52556 8.02007 8.97894 8.46867 8.33939 8.73358C7.69985 8.99849 6.99612 9.0678 6.31719 8.93275C5.63825 8.7977 5.01461 8.46436 4.52513 7.97487C4.03564 7.48539 3.7023 6.86175 3.56725 6.18282C3.4322 5.50388 3.50152 4.80015 3.76642 4.16061C4.03133 3.52107 4.47993 2.97444 5.05551 2.58986ZM14.85 9.6425L15.7075 10.5C15.8005 10.5927 15.8743 10.7029 15.9245 10.8242C15.9747 10.9456 16.0004 11.0757 16 11.207V16H2V13.5C2.00106 12.5721 2.37015 11.6824 3.0263 11.0263C3.68244 10.3701 4.57207 10.0011 5.5 10H8.5C9.42793 10.0011 10.3176 10.3701 10.9737 11.0263C11.6299 11.6824 11.9989 12.5721 12 13.5V15H15V11.207L14.143 10.35C13.9426 10.4476 13.7229 10.4989 13.5 10.5C13.2033 10.5 12.9133 10.412 12.6666 10.2472C12.42 10.0824 12.2277 9.84811 12.1142 9.57403C12.0006 9.29994 11.9709 8.99834 12.0288 8.70737C12.0867 8.41639 12.2296 8.14912 12.4393 7.93934C12.6491 7.72956 12.9164 7.5867 13.2074 7.52882C13.4983 7.47094 13.7999 7.50065 14.074 7.61418C14.3481 7.72771 14.5824 7.91997 14.7472 8.16665C14.912 8.41332 15 8.70333 15 9C14.9988 9.22271 14.9475 9.44229 14.85 9.6425ZM3.73311 11.7331C3.26444 12.2018 3.00079 12.8372 3 13.5V15H11V13.5C10.9992 12.8372 10.7356 12.2018 10.2669 11.7331C9.79822 11.2644 9.1628 11.0008 8.5 11H5.5C4.8372 11.0008 4.20178 11.2644 3.73311 11.7331Z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconAzureML.svelte b/packages/widgets/src/lib/components/Icons/IconAzureML.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..3fc373b4ba05f593431fdad7ef35068aebca2aa1
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconAzureML.svelte
@@ -0,0 +1,40 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<defs>
+		<linearGradient
+			id="machine-learning-service_e1bcfd0f-68b5-4f66-9501-e6a7245a18e7"
+			x1="1.1"
+			y1="169"
+			x2="11.12"
+			y2="169"
+			gradientTransform="translate(0 -160)"
+			gradientUnits="userSpaceOnUse"
+		>
+			<stop offset="0" stop-color="#50c7e8" />
+			<stop offset="0.25" stop-color="#4cc3e4" />
+			<stop offset="0.51" stop-color="#41b6da" />
+			<stop offset="0.77" stop-color="#2fa2c8" />
+			<stop offset="1" stop-color="#1989b2" />
+		</linearGradient>
+	</defs>
+	<path d="M15.8,17.5H2.2L1.1,13.4H16.9Z" fill="#198ab3" />
+	<polygon
+		points="6.9 0.5 6.9 6.9 1.1 13.4 2.2 17.5 11.1 6.9 11.1 0.5 6.9 0.5"
+		fill="url(#machine-learning-service_e1bcfd0f-68b5-4f66-9501-e6a7245a18e7)"
+	/>
+	<path d="M15.8,17.5,9.6,11.1l2.6-3,4.7,5.3Z" fill="#32bedd" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconCaretDown.svelte b/packages/widgets/src/lib/components/Icons/IconCaretDown.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..ae3574382b017c17f79271e6c9de9d12ee725a6f
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconCaretDown.svelte
@@ -0,0 +1,19 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 24 24"
+	style="transform: rotate(360deg);"
+>
+	<path d="M7 10l5 5l5-5z" fill="currentColor" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconCaretDownV2.svelte b/packages/widgets/src/lib/components/Icons/IconCaretDownV2.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..f8b4baac5a1e6de91b0c3149a89111cee87369fa
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconCaretDownV2.svelte
@@ -0,0 +1,11 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg class={classNames} xmlns="http://www.w3.org/2000/svg" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true">
+	<path
+		fill-rule="evenodd"
+		d="M5.293 7.293a1 1 0 011.414 0L10 10.586l3.293-3.293a1 1 0 111.414 1.414l-4 4a1 1 0 01-1.414 0l-4-4a1 1 0 010-1.414z"
+		clip-rule="evenodd"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconCode.svelte b/packages/widgets/src/lib/components/Icons/IconCode.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..a84e7b970a940cd4bec28b1900dae603e7248a11
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconCode.svelte
@@ -0,0 +1,21 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+	style="transform: rotate(360deg);"
+>
+	<path d="M31 16l-7 7l-1.41-1.41L28.17 16l-5.58-5.59L24 9l7 7z" fill="currentColor" />
+	<path d="M1 16l7-7l1.41 1.41L3.83 16l5.58 5.59L8 23l-7-7z" fill="currentColor" />
+	<path d="M12.419 25.484L17.639 6l1.932.518L14.35 26z" fill="currentColor" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconConversational.svelte b/packages/widgets/src/lib/components/Icons/IconConversational.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..38e01c10bd5be501bc16904e9c7cbd1762fe04fe
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconConversational.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path
+		d="M10.0461 16.7125L9.1 16.1687L11.275 12.3625H14.5375C14.8259 12.3625 15.1025 12.2479 15.3065 12.044C15.5104 11.84 15.625 11.5634 15.625 11.275V4.74998C15.625 4.46156 15.5104 4.18495 15.3065 3.981C15.1025 3.77706 14.8259 3.66248 14.5375 3.66248H3.6625C3.37407 3.66248 3.09746 3.77706 2.89352 3.981C2.68957 4.18495 2.575 4.46156 2.575 4.74998V11.275C2.575 11.5634 2.68957 11.84 2.89352 12.044C3.09746 12.2479 3.37407 12.3625 3.6625 12.3625H8.55625V13.45H3.6625C3.08565 13.45 2.53243 13.2208 2.12454 12.8129C1.71665 12.405 1.4875 11.8518 1.4875 11.275V4.74998C1.4875 4.17314 1.71665 3.61992 2.12454 3.21202C2.53243 2.80413 3.08565 2.57498 3.6625 2.57498H14.5375C15.1143 2.57498 15.6676 2.80413 16.0755 3.21202C16.4833 3.61992 16.7125 4.17314 16.7125 4.74998V11.275C16.7125 11.8518 16.4833 12.405 16.0755 12.8129C15.6676 13.2208 15.1143 13.45 14.5375 13.45H11.9057L10.0461 16.7125Z"
+	/>
+	<path d="M4.75 5.83746H13.45V6.92496H4.75V5.83746Z" />
+	<path d="M4.75 9.10004H10.1875V10.1875H4.75V9.10004Z" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconCross.svelte b/packages/widgets/src/lib/components/Icons/IconCross.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..ae6b04ffca6828d1483f4aea8522641f2c82320c
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconCross.svelte
@@ -0,0 +1,21 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1.1em"
+	height="1.1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M24 9.4L22.6 8L16 14.6L9.4 8L8 9.4l6.6 6.6L8 22.6L9.4 24l6.6-6.6l6.6 6.6l1.4-1.4l-6.6-6.6L24 9.4z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconDepthEstimation.svelte b/packages/widgets/src/lib/components/Icons/IconDepthEstimation.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..83362801b18c72cd5972afa26719047eec12a597
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconDepthEstimation.svelte
@@ -0,0 +1,10 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg class={classNames} width="1em" height="1em" viewBox="0 0 32 32"
+	><path
+		d="M29.474 19.12L23.681 16l5.793-3.12a1 1 0 0 0 0-1.76l-13-7a.998.998 0 0 0-.948 0l-13 7a1 1 0 0 0 0 1.76L8.319 16l-5.793 3.12a1 1 0 0 0 0 1.76l13 7a1 1 0 0 0 .948 0l13-7a1 1 0 0 0 0-1.76zM16 6.136L26.89 12L16 17.864L5.11 12zm0 19.728L5.11 20l5.319-2.864l5.097 2.744a1 1 0 0 0 .948 0l5.097-2.745L26.891 20z"
+		fill="currentColor"
+	/></svg
+>
diff --git a/packages/widgets/src/lib/components/Icons/IconDocumentQuestionAnswering.svelte b/packages/widgets/src/lib/components/Icons/IconDocumentQuestionAnswering.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..6c1c842972c809eda5952190e55d59e438db115b
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconDocumentQuestionAnswering.svelte
@@ -0,0 +1,13 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg class={classNames} width="1em" height="1em" viewBox="0 0 32 32"
+	><path
+		fill="currentColor"
+		d="M24 30a6 6 0 1 1 6-6a6.007 6.007 0 0 1-6 6zm0-10a4 4 0 1 0 4 4a4.005 4.005 0 0 0-4-4zM8 22h8v2H8zm0-6h10v2H8z"
+	/><path
+		fill="currentColor"
+		d="M16 28H6V4h8v6a2.006 2.006 0 0 0 2 2h6v3h2v-5a.91.91 0 0 0-.3-.7l-7-7A.909.909 0 0 0 16 2H6a2.006 2.006 0 0 0-2 2v24a2.006 2.006 0 0 0 2 2h10Zm0-23.6l5.6 5.6H16Z"
+	/></svg
+>
diff --git a/packages/widgets/src/lib/components/Icons/IconFeatureExtraction.svelte b/packages/widgets/src/lib/components/Icons/IconFeatureExtraction.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..c1806a76a7f0e0bdd1016652316370c524ed5f70
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconFeatureExtraction.svelte
@@ -0,0 +1,21 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M27 3H5a2 2 0 0 0-2 2v22a2 2 0 0 0 2 2h22a2 2 0 0 0 2-2V5a2 2 0 0 0-2-2zm0 2v4H5V5zm-10 6h10v7H17zm-2 7H5v-7h10zM5 20h10v7H5zm12 7v-7h10v7z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconFile.svelte b/packages/widgets/src/lib/components/Icons/IconFile.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..0d50da5d5391eaf66918d7b0b9d35c418e5daa01
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconFile.svelte
@@ -0,0 +1,21 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M25.7 9.3l-7-7A.908.908 0 0 0 18 2H8a2.006 2.006 0 0 0-2 2v24a2.006 2.006 0 0 0 2 2h16a2.006 2.006 0 0 0 2-2V10a.908.908 0 0 0-.3-.7zM18 4.4l5.6 5.6H18zM24 28H8V4h8v6a2.006 2.006 0 0 0 2 2h6z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconFillMask.svelte b/packages/widgets/src/lib/components/Icons/IconFillMask.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..e096d5a8cf27e9e8af6ac4e99c2a95ac31360386
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconFillMask.svelte
@@ -0,0 +1,27 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 19"
+>
+	<path
+		d="M12.3625 13.85H10.1875V12.7625H12.3625V10.5875H13.45V12.7625C13.4497 13.0508 13.335 13.3272 13.1312 13.5311C12.9273 13.735 12.6508 13.8497 12.3625 13.85V13.85Z"
+	/>
+	<path
+		d="M5.8375 8.41246H4.75V6.23746C4.75029 5.94913 4.86496 5.67269 5.06884 5.4688C5.27272 5.26492 5.54917 5.15025 5.8375 5.14996H8.0125V6.23746H5.8375V8.41246Z"
+	/>
+	<path
+		d="M15.625 5.14998H13.45V2.97498C13.4497 2.68665 13.335 2.4102 13.1312 2.20632C12.9273 2.00244 12.6508 1.88777 12.3625 1.88748H2.575C2.28666 1.88777 2.01022 2.00244 1.80633 2.20632C1.60245 2.4102 1.48778 2.68665 1.4875 2.97498V12.7625C1.48778 13.0508 1.60245 13.3273 1.80633 13.5311C2.01022 13.735 2.28666 13.8497 2.575 13.85H4.75V16.025C4.75028 16.3133 4.86495 16.5898 5.06883 16.7936C5.27272 16.9975 5.54916 17.1122 5.8375 17.1125H15.625C15.9133 17.1122 16.1898 16.9975 16.3937 16.7936C16.5975 16.5898 16.7122 16.3133 16.7125 16.025V6.23748C16.7122 5.94915 16.5975 5.6727 16.3937 5.46882C16.1898 5.26494 15.9133 5.15027 15.625 5.14998V5.14998ZM15.625 16.025H5.8375V13.85H8.0125V12.7625H5.8375V10.5875H4.75V12.7625H2.575V2.97498H12.3625V5.14998H10.1875V6.23748H12.3625V8.41248H13.45V6.23748H15.625V16.025Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconGraphML.svelte b/packages/widgets/src/lib/components/Icons/IconGraphML.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..6000b5ab3dbb44511396fd5549cd0662a4319c71
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconGraphML.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 12 12"
+>
+	<path
+		fill-rule="evenodd"
+		clip-rule="evenodd"
+		d="M8.72503 2.51055C8.07586 2.51055 7.54961 3.0368 7.54961 3.68597C7.54961 4.33514 8.07586 4.86139 8.72503 4.86139C9.3742 4.86139 9.90046 4.33514 9.90046 3.68597C9.90046 3.0368 9.3742 2.51055 8.72503 2.51055ZM6.84993 3.65093C6.86861 2.63133 7.70097 1.81055 8.72503 1.81055C9.7608 1.81055 10.6005 2.6502 10.6005 3.68597C10.6005 4.72174 9.7608 5.56139 8.72503 5.56139C7.91971 5.56139 7.23295 5.05381 6.96722 4.34111L5.32028 4.64561C5.32087 4.66485 5.32116 4.68417 5.32116 4.70355C5.32116 5.03663 5.23433 5.34944 5.08206 5.62056L6.37635 6.76258C6.69384 6.51597 7.09268 6.36914 7.52581 6.36914C8.56158 6.36914 9.40124 7.2088 9.40124 8.24456C9.40124 9.28033 8.56158 10.12 7.52581 10.12C6.49005 10.12 5.65039 9.28033 5.65039 8.24456C5.65039 7.8948 5.74614 7.5674 5.91285 7.28714L4.63154 6.15658C4.30841 6.4206 3.89557 6.57897 3.44574 6.57897C2.40997 6.57897 1.57031 5.73932 1.57031 4.70355C1.57031 3.66778 2.40997 2.82812 3.44574 2.82812C4.21802 2.82812 4.88128 3.29493 5.16875 3.96177L6.84993 3.65093ZM3.44574 3.52812C2.79657 3.52812 2.27031 4.05438 2.27031 4.70355C2.27031 5.35272 2.79657 5.87897 3.44574 5.87897C4.0949 5.87897 4.62116 5.35272 4.62116 4.70355C4.62116 4.05438 4.0949 3.52812 3.44574 3.52812ZM7.52581 7.06914C6.87665 7.06914 6.35039 7.5954 6.35039 8.24456C6.35039 8.89373 6.87665 9.41999 7.52581 9.41999C8.17498 9.41999 8.70124 8.89373 8.70124 8.24456C8.70124 7.5954 8.17498 7.06914 7.52581 7.06914Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconImageClassification.svelte b/packages/widgets/src/lib/components/Icons/IconImageClassification.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..fa83eb2057bb9faab35d59b9936e382df316cfa0
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconImageClassification.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<polygon points="4 20 4 22 8.586 22 2 28.586 3.414 30 10 23.414 10 28 12 28 12 20 4 20" />
+	<path d="M19,14a3,3,0,1,0-3-3A3,3,0,0,0,19,14Zm0-4a1,1,0,1,1-1,1A1,1,0,0,1,19,10Z" />
+	<path
+		d="M26,4H6A2,2,0,0,0,4,6V16H6V6H26V21.17l-3.59-3.59a2,2,0,0,0-2.82,0L18,19.17,11.8308,13l-1.4151,1.4155L14,18l2.59,2.59a2,2,0,0,0,2.82,0L21,19l5,5v2H16v2H26a2,2,0,0,0,2-2V6A2,2,0,0,0,26,4Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconImageSegmentation.svelte b/packages/widgets/src/lib/components/Icons/IconImageSegmentation.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..d84e55c747ed85fd76388e90ebd81e783ec9cf21
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconImageSegmentation.svelte
@@ -0,0 +1,24 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M30,3.4141,28.5859,2,2,28.5859,3.4141,30l2-2H26a2.0027,2.0027,0,0,0,2-2V5.4141ZM26,26H7.4141l7.7929-7.793,2.3788,2.3787a2,2,0,0,0,2.8284,0L22,19l4,3.9973Zm0-5.8318-2.5858-2.5859a2,2,0,0,0-2.8284,0L19,19.1682l-2.377-2.3771L26,7.4141Z"
+	/>
+	<path
+		d="M6,22V19l5-4.9966,1.3733,1.3733,1.4159-1.416-1.375-1.375a2,2,0,0,0-2.8284,0L6,16.1716V6H22V4H6A2.002,2.002,0,0,0,4,6V22Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconImageToImage.svelte b/packages/widgets/src/lib/components/Icons/IconImageToImage.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..1ea332d30392075d9e224d76683b37b99cdf1927
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconImageToImage.svelte
@@ -0,0 +1,25 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		fill="currentColor"
+		d="M4 22H2V4a2.002 2.002 0 0 1 2-2h18v2H4zm17-5a3 3 0 1 0-3-3a3.003 3.003 0 0 0 3 3zm0-4a1 1 0 1 1-1 1a1 1 0 0 1 1-1z"
+	/><path
+		fill="currentColor"
+		d="M28 7H9a2.002 2.002 0 0 0-2 2v19a2.002 2.002 0 0 0 2 2h19a2.002 2.002 0 0 0 2-2V9a2.002 2.002 0 0 0-2-2Zm0 21H9v-6l4-3.997l5.586 5.586a2 2 0 0 0 2.828 0L23 22.003L28 27Zm0-3.828l-3.586-3.586a2 2 0 0 0-2.828 0L20 22.172l-5.586-5.586a2 2 0 0 0-2.828 0L9 19.172V9h19Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconImageToText.svelte b/packages/widgets/src/lib/components/Icons/IconImageToText.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..8263c0801f56beeef9c64c7fbb96bf66eda61227
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconImageToText.svelte
@@ -0,0 +1,28 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M29.707 19.293l-3-3a1 1 0 0 0-1.414 0L16 25.586V30h4.414l9.293-9.293a1 1 0 0 0 0-1.414zM19.586 28H18v-1.586l5-5L24.586 23zM26 21.586L24.414 20L26 18.414L27.586 20z"
+		fill="currentColor"
+	/>
+	<path
+		d="M20 13v-2h-2.142a3.94 3.94 0 0 0-.425-1.019l1.517-1.517l-1.414-1.414l-1.517 1.517A3.944 3.944 0 0 0 15 8.142V6h-2v2.142a3.944 3.944 0 0 0-1.019.425L10.464 7.05L9.05 8.464l1.517 1.517A3.94 3.94 0 0 0 10.142 11H8v2h2.142a3.94 3.94 0 0 0 .425 1.019L9.05 15.536l1.414 1.414l1.517-1.517a3.944 3.944 0 0 0 1.019.425V18h2v-2.142a3.944 3.944 0 0 0 1.019-.425l1.517 1.517l1.414-1.414l-1.517-1.517A3.94 3.94 0 0 0 17.858 13zm-6 1a2 2 0 1 1 2-2a2.002 2.002 0 0 1-2 2z"
+		fill="currentColor"
+	/>
+	<path
+		d="M12 30H6a2.002 2.002 0 0 1-2-2V4a2.002 2.002 0 0 1 2-2h16a2.002 2.002 0 0 1 2 2v10h-2V4H6v24h6z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconInfo.svelte b/packages/widgets/src/lib/components/Icons/IconInfo.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..6b5a9b1f993bb62e649dfe42aa3c47c5ef200998
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconInfo.svelte
@@ -0,0 +1,20 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path d="M17 22v-8h-4v2h2v6h-3v2h8v-2h-3z" fill="currentColor" />
+	<path d="M16 8a1.5 1.5 0 1 0 1.5 1.5A1.5 1.5 0 0 0 16 8z" fill="currentColor" />
+	<path d="M16 30a14 14 0 1 1 14-14a14 14 0 0 1-14 14zm0-26a12 12 0 1 0 12 12A12 12 0 0 0 16 4z" fill="currentColor" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconLightning.svelte b/packages/widgets/src/lib/components/Icons/IconLightning.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..91d87c2ccbac546e8e21f2e022866a64feb9cbaf
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconLightning.svelte
@@ -0,0 +1,18 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	class={classNames}
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 24 24"
+>
+	<path d="M11 15H6l7-14v8h5l-7 14v-8z" fill="currentColor" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconMagicWand.svelte b/packages/widgets/src/lib/components/Icons/IconMagicWand.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..7aa63b2a2947ad0fca833ae3cd7f7ab953c650ce
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconMagicWand.svelte
@@ -0,0 +1,22 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+	><path
+		d="M29.414 24L12 6.586a2.048 2.048 0 0 0-2.828 0L6.586 9.172a2.002 2.002 0 0 0 0 2.828l17.413 17.414a2.002 2.002 0 0 0 2.828 0l2.587-2.586a2 2 0 0 0 0-2.828zM8 10.586L10.586 8l5 5l-2.587 2.587l-5-5zM25.413 28l-11-10.999L17 14.414l11 11z"
+		fill="currentColor"
+	/><path d="M2 16l2-2l2 2l-2 2z" fill="currentColor" /><path d="M14 4l2-2l2 2l-2 2z" fill="currentColor" /><path
+		d="M2 4l2-2l2 2l-2 2z"
+		fill="currentColor"
+	/></svg
+>
diff --git a/packages/widgets/src/lib/components/Icons/IconMaximize.svelte b/packages/widgets/src/lib/components/Icons/IconMaximize.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..872037067ba188e8c90c275f09d368228e3faf7f
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconMaximize.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path d="M22 16h2V8h-8v2h6v6z" fill="currentColor" />
+	<path d="M8 24h8v-2h-6v-6H8v8z" fill="currentColor" />
+	<path
+		d="M26 28H6a2.002 2.002 0 0 1-2-2V6a2.002 2.002 0 0 1 2-2h20a2.002 2.002 0 0 1 2 2v20a2.002 2.002 0 0 1-2 2zM6 6v20h20.001L26 6z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconMicrophone.svelte b/packages/widgets/src/lib/components/Icons/IconMicrophone.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..d4638549352378b7cd2a8212d3c053e885d7ba72
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconMicrophone.svelte
@@ -0,0 +1,25 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M23 14v3a7 7 0 0 1-14 0v-3H7v3a9 9 0 0 0 8 8.94V28h-4v2h10v-2h-4v-2.06A9 9 0 0 0 25 17v-3z"
+		fill="currentColor"
+	/>
+	<path
+		d="M16 22a5 5 0 0 0 5-5V7a5 5 0 0 0-10 0v10a5 5 0 0 0 5 5zM13 7a3 3 0 0 1 6 0v10a3 3 0 0 1-6 0z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconObjectDetection.svelte b/packages/widgets/src/lib/components/Icons/IconObjectDetection.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..ae1e1a7a68974667d77a1899a04a7e787e2cc48d
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconObjectDetection.svelte
@@ -0,0 +1,25 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M24,14a5.99,5.99,0,0,0-4.885,9.4712L14,28.5859,15.4141,30l5.1147-5.1147A5.9971,5.9971,0,1,0,24,14Zm0,10a4,4,0,1,1,4-4A4.0045,4.0045,0,0,1,24,24Z"
+	/>
+	<path d="M17,12a3,3,0,1,0-3-3A3.0033,3.0033,0,0,0,17,12Zm0-4a1,1,0,1,1-1,1A1.0009,1.0009,0,0,1,17,8Z" />
+	<path
+		d="M12,24H4V17.9966L9,13l5.5859,5.5859L16,17.168l-5.5859-5.5855a2,2,0,0,0-2.8282,0L4,15.168V4H24v6h2V4a2.0023,2.0023,0,0,0-2-2H4A2.002,2.002,0,0,0,2,4V24a2.0023,2.0023,0,0,0,2,2h8Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconQuestionAnswering.svelte b/packages/widgets/src/lib/components/Icons/IconQuestionAnswering.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..b57d90ac4597afba15027b2a2768a3e91c223bfb
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconQuestionAnswering.svelte
@@ -0,0 +1,21 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path d="M2 9h9V2H2zm2-5h5v3H4z" fill="currentColor" />
+	<path d="M2 19h9v-7H2zm2-5h5v3H4z" fill="currentColor" />
+	<path d="M2 29h9v-7H2zm2-5h5v3H4z" fill="currentColor" />
+	<path
+		d="M27 9h-9l3.41-3.59L20 4l-6 6l6 6l1.41-1.41L18 11h9a1 1 0 0 1 1 1v12a1 1 0 0 1-1 1H15v2h12a3 3 0 0 0 3-3V12a3 3 0 0 0-3-3z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconReinforcementLearning.svelte b/packages/widgets/src/lib/components/Icons/IconReinforcementLearning.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..802dc243766e5d04db7624aa0cb49acd86b2400e
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconReinforcementLearning.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path fill="currentColor" d="M18 10h2v2h-2zm-6 0h2v2h-2z" />
+	<path
+		fill="currentColor"
+		d="M26 20h-5v-2h1a2.002 2.002 0 0 0 2-2v-4h2v-2h-2V8a2.002 2.002 0 0 0-2-2h-2V2h-2v4h-4V2h-2v4h-2a2.002 2.002 0 0 0-2 2v2H6v2h2v4a2.002 2.002 0 0 0 2 2h1v2H6a2.002 2.002 0 0 0-2 2v8h2v-8h20v8h2v-8a2.002 2.002 0 0 0-2-2ZM10 8h12v8H10Zm3 10h6v2h-6Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconRobotics.svelte b/packages/widgets/src/lib/components/Icons/IconRobotics.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..6f354638e8dd8a4bc20ecec7acab2785752f3f23
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconRobotics.svelte
@@ -0,0 +1,22 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 12 12"
+>
+	<path
+		d="M2.41562 9.48284H4.48563L1.79462 5.04284C1.61912 4.76855 1.51562 4.46427 1.51562 4.1257C1.51603 3.70884 1.67594 3.30641 1.96543 2.99375C2.25491 2.68109 2.65413 2.47963 3.08834 2.42708C3.52255 2.37454 3.96198 2.47451 4.32437 2.70829C4.68676 2.94206 4.94727 3.29361 5.05712 3.69713H6.91562V2.6257C6.91562 2.15427 7.32062 1.76855 7.81562 1.76855V2.87855L8.53112 2.19713H10.5156V3.05427H8.90012L7.81562 4.08713V4.16427L8.90012 5.19713H10.5156V6.05427H8.53112L7.81562 5.37284V6.48284C7.57693 6.48284 7.34801 6.39253 7.17923 6.23179C7.01045 6.07104 6.91562 5.85303 6.91562 5.6257V4.55427H5.05712C5.01212 4.72141 4.94012 4.87998 4.85012 5.0257L7.55012 9.48284H9.61562C9.85432 9.48284 10.0832 9.57315 10.252 9.73389C10.4208 9.89464 10.5156 10.1127 10.5156 10.34H1.51562C1.51562 9.86855 1.92062 9.48284 2.41562 9.48284ZM4.17512 5.6257C3.92312 5.76284 3.63062 5.83998 3.31562 5.83998L5.52512 9.48284H6.51063L4.17512 5.6257ZM3.31562 3.26855C3.07693 3.26855 2.84801 3.35886 2.67923 3.51961C2.51045 3.68035 2.41562 3.89837 2.41562 4.1257C2.41562 4.60141 2.81612 4.98284 3.31562 4.98284C3.81512 4.98284 4.21562 4.60141 4.21562 4.1257C4.21562 3.89837 4.1208 3.68035 3.95202 3.51961C3.78324 3.35886 3.55432 3.26855 3.31562 3.26855Z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconRow.svelte b/packages/widgets/src/lib/components/Icons/IconRow.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..7710d2b52baaee24c2e54efca5bf799c91b969fa
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconRow.svelte
@@ -0,0 +1,16 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	viewBox="0 0 32 32"
+>
+	<path d="M3 11v2h26v-2H3zm0 8v2h26v-2H3z" fill="currentColor" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconSentenceSimilarity.svelte b/packages/widgets/src/lib/components/Icons/IconSentenceSimilarity.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..def158c732d5c3c4b016202e8de56b4ad57210b2
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconSentenceSimilarity.svelte
@@ -0,0 +1,25 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path d="M30 15H17V2h-2v13H2v2h13v13h2V17h13v-2z" />
+	<path d="M25.586 20L27 21.414L23.414 25L27 28.586L25.586 30l-5-5l5-5z" />
+	<path d="M11 30H3a1 1 0 0 1-.894-1.447l4-8a1.041 1.041 0 0 1 1.789 0l4 8A1 1 0 0 1 11 30zm-6.382-2h4.764L7 23.236z" />
+	<path
+		d="M28 12h-6a2.002 2.002 0 0 1-2-2V4a2.002 2.002 0 0 1 2-2h6a2.002 2.002 0 0 1 2 2v6a2.002 2.002 0 0 1-2 2zm-6-8v6h6.001L28 4z"
+	/>
+	<path d="M7 12a5 5 0 1 1 5-5a5.006 5.006 0 0 1-5 5zm0-8a3 3 0 1 0 3 3a3.003 3.003 0 0 0-3-3z" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconSpin.svelte b/packages/widgets/src/lib/components/Icons/IconSpin.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..2afa35d83977a3b7ac1a0e078f14291ae99897f3
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconSpin.svelte
@@ -0,0 +1,30 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="none"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 24 24"
+>
+	<g fill="none"
+		><path
+			opacity=".2"
+			fill-rule="evenodd"
+			clip-rule="evenodd"
+			d="M12 19a7 7 0 1 0 0-14a7 7 0 0 0 0 14zm0 3c5.523 0 10-4.477 10-10S17.523 2 12 2S2 6.477 2 12s4.477 10 10 10z"
+			fill="currentColor"
+		/><path d="M12 22c5.523 0 10-4.477 10-10h-3a7 7 0 0 1-7 7v3z" fill="currentColor" /><path
+			d="M2 12C2 6.477 6.477 2 12 2v3a7 7 0 0 0-7 7H2z"
+			fill="currentColor"
+		/></g
+	>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconSummarization.svelte b/packages/widgets/src/lib/components/Icons/IconSummarization.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..8c0ee2fc50d95e4ac11c7148f96e6dcb68f48886
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconSummarization.svelte
@@ -0,0 +1,22 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 19"
+>
+	<path
+		d="M15.4988 8.79309L12.1819 5.47621C12.0188 5.25871 11.7469 5.14996 11.475 5.14996H7.12501C6.52688 5.14996 6.03751 5.63934 6.03751 6.23746V16.025C6.03751 16.6231 6.52688 17.1125 7.12501 17.1125H14.7375C15.3356 17.1125 15.825 16.6231 15.825 16.025V9.55434C15.825 9.28246 15.7163 9.01059 15.4988 8.79309V8.79309ZM11.475 6.23746L14.6831 9.49996H11.475V6.23746ZM7.12501 16.025V6.23746H10.3875V9.49996C10.3875 10.0981 10.8769 10.5875 11.475 10.5875H14.7375V16.025H7.12501Z"
+	/>
+	<path d="M3.8625 10.5875H2.775V2.97498C2.775 2.37686 3.26438 1.88748 3.8625 1.88748H11.475V2.97498H3.8625V10.5875Z" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTableQuestionAnswering.svelte b/packages/widgets/src/lib/components/Icons/IconTableQuestionAnswering.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..c11027345e504ce119d01a1799e75ee7ac2a6c96
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTableQuestionAnswering.svelte
@@ -0,0 +1,21 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 19"
+>
+	<path
+		d="M15.825 1.88748H6.0375C5.74917 1.88777 5.47272 2.00244 5.26884 2.20632C5.06496 2.4102 4.95029 2.68665 4.95 2.97498V4.60623H2.775C2.48667 4.60652 2.21022 4.72119 2.00634 4.92507C1.80246 5.12895 1.68779 5.4054 1.6875 5.69373V16.025C1.68779 16.3133 1.80246 16.5898 2.00634 16.7936C2.21022 16.9975 2.48667 17.1122 2.775 17.1125H15.825C16.1133 17.1122 16.3898 16.9975 16.5937 16.7936C16.7975 16.5898 16.9122 16.3133 16.9125 16.025V2.97498C16.9122 2.68665 16.7975 2.4102 16.5937 2.20632C16.3898 2.00244 16.1133 1.88777 15.825 1.88748ZM6.0375 2.97498H15.825V4.60623H6.0375V2.97498ZM15.825 8.41248H11.475V5.69373H15.825V8.41248ZM6.0375 12.2187V9.49998H10.3875V12.2187H6.0375ZM10.3875 13.3062V16.025H6.0375V13.3062H10.3875ZM4.95 12.2187H2.775V9.49998H4.95V12.2187ZM10.3875 5.69373V8.41248H6.0375V5.69373H10.3875ZM11.475 9.49998H15.825V12.2187H11.475V9.49998ZM4.95 5.69373V8.41248H2.775V5.69373H4.95ZM2.775 13.3062H4.95V16.025H2.775V13.3062ZM11.475 16.025V13.3062H15.825V16.025H11.475Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTabularClassification.svelte b/packages/widgets/src/lib/components/Icons/IconTabularClassification.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..d4ce233b18d18866b3fd0853151949309d95daac
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTabularClassification.svelte
@@ -0,0 +1,22 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M29 5a2 2 0 0 0-2-2H5a2 2 0 0 0-2 2v22a2 2 0 0 0 2 2h22a2 2 0 0 0 2-2zm-2 0v4H5V5zm0 22H5v-4h22zm0-6H5v-4h22zm0-6H5v-4h22z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTabularRegression.svelte b/packages/widgets/src/lib/components/Icons/IconTabularRegression.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..ed4355c5dc54a6480a3ac895b5be4326e7187e8e
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTabularRegression.svelte
@@ -0,0 +1,20 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		fill="currentColor"
+		d="m4.67 28l6.39-12l7.3 6.49a2 2 0 0 0 1.7.47a2 2 0 0 0 1.42-1.07L27 10.9l-1.82-.9l-5.49 11l-7.3-6.49a2 2 0 0 0-1.68-.51a2 2 0 0 0-1.42 1L4 25V2H2v26a2 2 0 0 0 2 2h26v-2Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconText2textGeneration.svelte b/packages/widgets/src/lib/components/Icons/IconText2textGeneration.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..ed5a2c6c7acb3b8edca487cee7bdc69df5303acd
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconText2textGeneration.svelte
@@ -0,0 +1,27 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path
+		d="M4.00626 16.5125C3.46854 16.5125 2.9429 16.353 2.4958 16.0543C2.0487 15.7556 1.70024 15.3309 1.49446 14.8342C1.28868 14.3374 1.23484 13.7907 1.33975 13.2633C1.44465 12.7359 1.70359 12.2515 2.08381 11.8713C2.46403 11.4911 2.94847 11.2321 3.47586 11.1272C4.00324 11.0223 4.54989 11.0762 5.04668 11.2819C5.54346 11.4877 5.96807 11.8362 6.26681 12.2833C6.56555 12.7304 6.72501 13.256 6.72501 13.7937C6.72414 14.5145 6.43743 15.2055 5.92775 15.7152C5.41807 16.2249 4.72705 16.5116 4.00626 16.5125V16.5125ZM4.00626 12.1625C3.68363 12.1625 3.36824 12.2582 3.09998 12.4374C2.83173 12.6166 2.62264 12.8714 2.49918 13.1695C2.37571 13.4676 2.34341 13.7955 2.40635 14.112C2.46929 14.4284 2.62465 14.7191 2.85279 14.9472C3.08092 15.1753 3.37158 15.3307 3.68802 15.3936C4.00445 15.4566 4.33244 15.4243 4.63051 15.3008C4.92858 15.1773 5.18335 14.9683 5.36259 14.7C5.54184 14.4317 5.63751 14.1164 5.63751 13.7937C5.63708 13.3612 5.46507 12.9466 5.15925 12.6407C4.85342 12.3349 4.43876 12.1629 4.00626 12.1625Z"
+	/>
+	<path
+		d="M13.25 14.3375H7.81251V13.25H13.25V9.44371H4.55001C4.26167 9.44343 3.98523 9.32876 3.78135 9.12487C3.57747 8.92099 3.4628 8.64455 3.46251 8.35621V4.54996C3.4628 4.26163 3.57747 3.98519 3.78135 3.7813C3.98523 3.57742 4.26167 3.46275 4.55001 3.46246H9.98751V4.54996H4.55001V8.35621H13.25C13.5383 8.3565 13.8148 8.47117 14.0187 8.67505C14.2226 8.87894 14.3372 9.15538 14.3375 9.44371V13.25C14.3372 13.5383 14.2226 13.8147 14.0187 14.0186C13.8148 14.2225 13.5383 14.3372 13.25 14.3375V14.3375Z"
+	/>
+	<path
+		d="M15.425 6.72504H12.1625C11.8742 6.72475 11.5977 6.61008 11.3939 6.4062C11.19 6.20231 11.0753 5.92587 11.075 5.63754V2.37504C11.0753 2.0867 11.19 1.81026 11.3939 1.60638C11.5977 1.40249 11.8742 1.28782 12.1625 1.28754H15.425C15.7133 1.28782 15.9898 1.40249 16.1937 1.60638C16.3976 1.81026 16.5122 2.0867 16.5125 2.37504V5.63754C16.5122 5.92587 16.3976 6.20231 16.1937 6.4062C15.9898 6.61008 15.7133 6.72475 15.425 6.72504V6.72504ZM12.1625 2.37504V5.63754H15.425V2.37504H12.1625Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTextClassification.svelte b/packages/widgets/src/lib/components/Icons/IconTextClassification.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..e747e6d2d3e80ce3845c166eeca3d509acb1bd87
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTextClassification.svelte
@@ -0,0 +1,33 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+	style="transform: rotate(360deg);"
+>
+	<circle cx="10" cy="20" r="2" fill="currentColor" />
+	<circle cx="10" cy="28" r="2" fill="currentColor" />
+	<circle cx="10" cy="14" r="2" fill="currentColor" />
+	<circle cx="28" cy="4" r="2" fill="currentColor" />
+	<circle cx="22" cy="6" r="2" fill="currentColor" />
+	<circle cx="28" cy="10" r="2" fill="currentColor" />
+	<circle cx="20" cy="12" r="2" fill="currentColor" />
+	<circle cx="28" cy="22" r="2" fill="currentColor" />
+	<circle cx="26" cy="28" r="2" fill="currentColor" />
+	<circle cx="20" cy="26" r="2" fill="currentColor" />
+	<circle cx="22" cy="20" r="2" fill="currentColor" />
+	<circle cx="16" cy="4" r="2" fill="currentColor" />
+	<circle cx="4" cy="24" r="2" fill="currentColor" />
+	<circle cx="4" cy="16" r="2" fill="currentColor" />
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTextGeneration.svelte b/packages/widgets/src/lib/components/Icons/IconTextGeneration.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..fa0153e0d878f64b7cd3989beb606bdd5dc060b5
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTextGeneration.svelte
@@ -0,0 +1,25 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path
+		d="M16.2607 8.08202L14.468 6.28928C14.3063 6.12804 14.0873 6.03749 13.859 6.03749C13.6307 6.03749 13.4117 6.12804 13.25 6.28928L5.6375 13.904V16.9125H8.64607L16.2607 9.30002C16.422 9.13836 16.5125 8.91935 16.5125 8.69102C16.5125 8.4627 16.422 8.24369 16.2607 8.08202V8.08202ZM8.1953 15.825H6.725V14.3547L11.858 9.22118L13.3288 10.6915L8.1953 15.825ZM14.0982 9.92262L12.6279 8.45232L13.8606 7.21964L15.3309 8.68994L14.0982 9.92262Z"
+	/>
+	<path d="M6.18125 9.84373H7.26875V6.03748H8.9V4.94998H4.55V6.03748H6.18125V9.84373Z" />
+	<path
+		d="M4.55 11.475H2.375V2.775H11.075V4.95H12.1625V2.775C12.1625 2.48658 12.0479 2.20997 11.844 2.00602C11.64 1.80208 11.3634 1.6875 11.075 1.6875H2.375C2.08658 1.6875 1.80997 1.80208 1.60602 2.00602C1.40207 2.20997 1.2875 2.48658 1.2875 2.775V11.475C1.2875 11.7634 1.40207 12.04 1.60602 12.244C1.80997 12.4479 2.08658 12.5625 2.375 12.5625H4.55V11.475Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTextToImage.svelte b/packages/widgets/src/lib/components/Icons/IconTextToImage.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..a7adab86fbb37dd8786645150d2929359d2efe30
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTextToImage.svelte
@@ -0,0 +1,25 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		d="M29.537 13.76l-3.297-3.297a1.586 1.586 0 0 0-2.24 0L10 24.467V30h5.533l14.004-14a1.586 1.586 0 0 0 0-2.24zM14.704 28H12v-2.704l9.44-9.441l2.705 2.704zM25.56 17.145l-2.704-2.704l2.267-2.267l2.704 2.704z"
+		fill="currentColor"
+	/><path d="M11 17h2v-7h3V8H8v2h3v7z" fill="currentColor" /><path
+		d="M8 20H4V4h16v4h2V4a2 2 0 0 0-2-2H4a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h4z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTextToSpeech.svelte b/packages/widgets/src/lib/components/Icons/IconTextToSpeech.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..7c533cfcb0d267a2ff38fc8ffdebf549cdecb419
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTextToSpeech.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path
+		fill-rule="evenodd"
+		clip-rule="evenodd"
+		d="M3.0625 3.0625L10 3.0625V2H3.0625C2.78071 2 2.51045 2.11194 2.3112 2.3112C2.11194 2.51046 2 2.78071 2 3.0625V11.5625C2 11.8443 2.11194 12.1145 2.3112 12.3138C2.51045 12.5131 2.78071 12.625 3.0625 12.625H7V11.5625H3.0625L3.0625 3.0625ZM5.78125 9.96875H6.84375V6.25H8.4375V5.1875H4.1875V6.25H5.78125V9.96875ZM12.5 13C13.163 13 13.7989 12.7366 14.2678 12.2678C14.7366 11.7989 15 11.163 15 10.5V5.5C15 4.83696 14.7366 4.20107 14.2678 3.73223C13.7989 3.26339 13.163 3 12.5 3C11.837 3 11.2011 3.26339 10.7322 3.73223C10.2634 4.20107 10 4.83696 10 5.5V10.5C10 11.163 10.2634 11.7989 10.7322 12.2678C11.2011 12.7366 11.837 13 12.5 13ZM11 5.5C11 5.10218 11.158 4.72064 11.4393 4.43934C11.7206 4.15804 12.1022 4 12.5 4C12.8978 4 13.2794 4.15804 13.5607 4.43934C13.842 4.72064 14 5.10218 14 5.5V10.5C14 10.8978 13.842 11.2794 13.5607 11.5607C13.2794 11.842 12.8978 12 12.5 12C12.1022 12 11.7206 11.842 11.4393 11.5607C11.158 11.2794 11 10.8978 11 10.5V5.5ZM16 9V10.5C16 11.4283 15.6313 12.3185 14.9749 12.9749C14.3185 13.6313 13.4283 14 12.5 14C11.5717 14 10.6815 13.6313 10.0251 12.9749C9.36875 12.3185 9 11.4283 9 10.5V9H8V10.5C8.00053 11.6065 8.40873 12.6741 9.14661 13.4987C9.88449 14.3232 10.9003 14.8471 12 14.97V16H10V17H15V16H13V14.97C14.0997 14.8471 15.1155 14.3232 15.8534 13.4987C16.5913 12.6741 16.9995 11.6065 17 10.5V9H16Z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTextToVideo.svelte b/packages/widgets/src/lib/components/Icons/IconTextToVideo.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..3082ea2e673af93e3ca0b14fcde4c018334ad4c9
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTextToVideo.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		fill="currentColor"
+		fill-rule="evenodd"
+		d="M3.91 7.49v13.77h6.78v2H3.9c-1.1 0-2-.9-2-2V7.5c0-1.11.9-2.01 2-2.01h13.93c1.11 0 2.02.9 2.02 2.01v3.56h-2V7.5l-.02-.01H3.91Zm5.97 4.65H7.53v-2h6.7v2h-2.35v6.52h-2v-6.52Zm6.82 1.24a2.6 2.6 0 0 0-2.6 2.59v7.95a2.6 2.6 0 0 0 2.6 2.6h7.63a2.6 2.6 0 0 0 2.59-2.6v-.51l1.54 1.28a1 1 0 0 0 1.64-.77v-7.95a1 1 0 0 0-1.64-.77l-1.54 1.28v-.51a2.6 2.6 0 0 0-2.6-2.6H16.7Zm-.6 2.59a.6.6 0 0 1 .6-.6h7.63a.6.6 0 0 1 .59.6v2.65a1 1 0 0 0 1.64.77l1.54-1.29v3.69l-1.54-1.29a1 1 0 0 0-1.64.77v2.65a.6.6 0 0 1-.6.6H16.7a.6.6 0 0 1-.6-.6v-7.95Z"
+		clip-rule="evenodd"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTokenClassification.svelte b/packages/widgets/src/lib/components/Icons/IconTokenClassification.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..27f3d3625882a5bf62d6ca0b03e44d9b869360c0
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTokenClassification.svelte
@@ -0,0 +1,33 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path d="M11.075 10.1875H12.1625V11.275H11.075V10.1875Z" />
+	<path d="M15.425 9.10004H16.5125V10.1875H15.425V9.10004Z" />
+	<path d="M7.8125 3.66254H8.9V4.75004H7.8125V3.66254Z" />
+	<path
+		d="M8.90001 12.3625H6.72501V9.09998C6.72472 8.81165 6.61005 8.5352 6.40617 8.33132C6.20228 8.12744 5.92584 8.01277 5.63751 8.01248H2.37501C2.08667 8.01277 1.81023 8.12744 1.60635 8.33132C1.40246 8.5352 1.28779 8.81165 1.28751 9.09998V12.3625C1.28779 12.6508 1.40246 12.9273 1.60635 13.1311C1.81023 13.335 2.08667 13.4497 2.37501 13.45H5.63751V15.625C5.63779 15.9133 5.75246 16.1898 5.95635 16.3936C6.16023 16.5975 6.43667 16.7122 6.72501 16.7125H8.90001C9.18834 16.7122 9.46478 16.5975 9.66867 16.3936C9.87255 16.1898 9.98722 15.9133 9.98751 15.625V13.45C9.98722 13.1616 9.87255 12.8852 9.66867 12.6813C9.46478 12.4774 9.18834 12.3628 8.90001 12.3625V12.3625ZM2.37501 12.3625V9.09998H5.63751V12.3625H2.37501ZM6.72501 15.625V13.45H8.90001V15.625H6.72501Z"
+	/>
+	<path
+		d="M15.425 16.7125H13.25C12.9617 16.7122 12.6852 16.5976 12.4813 16.3937C12.2775 16.1898 12.1628 15.9134 12.1625 15.625V13.45C12.1628 13.1617 12.2775 12.8852 12.4813 12.6814C12.6852 12.4775 12.9617 12.3628 13.25 12.3625H15.425C15.7133 12.3628 15.9898 12.4775 16.1937 12.6814C16.3976 12.8852 16.5122 13.1617 16.5125 13.45V15.625C16.5122 15.9134 16.3976 16.1898 16.1937 16.3937C15.9898 16.5976 15.7133 16.7122 15.425 16.7125ZM13.25 13.45V15.625H15.425V13.45H13.25Z"
+	/>
+	<path
+		d="M15.425 1.48752H12.1625C11.8742 1.48781 11.5977 1.60247 11.3938 1.80636C11.19 2.01024 11.0753 2.28668 11.075 2.57502V5.83752H9.98751C9.69917 5.83781 9.42273 5.95247 9.21885 6.15636C9.01496 6.36024 8.9003 6.63668 8.90001 6.92502V8.01252C8.9003 8.30085 9.01496 8.5773 9.21885 8.78118C9.42273 8.98506 9.69917 9.09973 9.98751 9.10002H11.075C11.3633 9.09973 11.6398 8.98506 11.8437 8.78118C12.0476 8.5773 12.1622 8.30085 12.1625 8.01252V6.92502H15.425C15.7133 6.92473 15.9898 6.81006 16.1937 6.60618C16.3976 6.4023 16.5122 6.12585 16.5125 5.83752V2.57502C16.5122 2.28668 16.3976 2.01024 16.1937 1.80636C15.9898 1.60247 15.7133 1.48781 15.425 1.48752ZM9.98751 8.01252V6.92502H11.075V8.01252H9.98751ZM12.1625 5.83752V2.57502H15.425V5.83752H12.1625Z"
+	/>
+	<path
+		d="M4.55001 5.83752H2.37501C2.08667 5.83723 1.81023 5.72256 1.60635 5.51868C1.40246 5.3148 1.28779 5.03835 1.28751 4.75002V2.57502C1.28779 2.28668 1.40246 2.01024 1.60635 1.80636C1.81023 1.60247 2.08667 1.48781 2.37501 1.48752H4.55001C4.83834 1.48781 5.11478 1.60247 5.31867 1.80636C5.52255 2.01024 5.63722 2.28668 5.63751 2.57502V4.75002C5.63722 5.03835 5.52255 5.3148 5.31867 5.51868C5.11478 5.72256 4.83834 5.83723 4.55001 5.83752V5.83752ZM2.37501 2.57502V4.75002H4.55001V2.57502H2.37501Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconTranslation.svelte b/packages/widgets/src/lib/components/Icons/IconTranslation.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..c0d3444206f9a6c97bc9a79cd23fbdb73dc89df1
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconTranslation.svelte
@@ -0,0 +1,24 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path
+		d="M15.7435 16.3688H16.9125L13.65 8.21251H12.3722L9.1097 16.3688H10.2788L11.1488 14.1938H14.8735L15.7435 16.3688ZM11.5838 13.1063L13.0084 9.53926L14.4385 13.1063H11.5838Z"
+	/>
+	<path
+		d="M10.3875 4.40625V3.31875H6.58125V1.6875H5.49375V3.31875H1.6875V4.40625H7.52737C7.2261 5.64892 6.63129 6.80125 5.79281 7.76663C5.24624 7.08884 4.8246 6.31923 4.54763 5.49375H3.40575C3.74803 6.60116 4.30202 7.63159 5.037 8.52787C4.2247 9.3158 3.27338 9.94633 2.23125 10.3875L2.63906 11.3989C3.81007 10.9044 4.87658 10.1922 5.78194 9.3C6.67088 10.2044 7.73719 10.9153 8.91394 11.388L9.3 10.3875C8.25187 9.98235 7.3026 9.35754 6.516 8.55506C7.55705 7.36858 8.2892 5.94351 8.6475 4.40625H10.3875Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconUnconditionalImageGeneration.svelte b/packages/widgets/src/lib/components/Icons/IconUnconditionalImageGeneration.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..880d6ae9c6b505ed592c307ec9f206c5549bcdce
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconUnconditionalImageGeneration.svelte
@@ -0,0 +1,22 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path fill="currentColor" d="M19 14a3 3 0 1 0-3-3a3 3 0 0 0 3 3Zm0-4a1 1 0 1 1-1 1a1 1 0 0 1 1-1Z" /><path
+		fill="currentColor"
+		d="M26 4H6a2 2 0 0 0-2 2v20a2 2 0 0 0 2 2h20a2 2 0 0 0 2-2V6a2 2 0 0 0-2-2Zm0 22H6v-6l5-5l5.59 5.59a2 2 0 0 0 2.82 0L21 19l5 5Zm0-4.83l-3.59-3.59a2 2 0 0 0-2.82 0L18 19.17l-5.59-5.59a2 2 0 0 0-2.82 0L6 17.17V6h20Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconVideoClassification.svelte b/packages/widgets/src/lib/components/Icons/IconVideoClassification.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..669a372caa8710b7ace03fba1f1cd5b599dacb6d
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconVideoClassification.svelte
@@ -0,0 +1,24 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 32 32"
+>
+	<path
+		fill-rule="evenodd"
+		clip-rule="evenodd"
+		d="M21 26H16V24H21V18C21.0008 17.8168 21.052 17.6373 21.1479 17.4812C21.2438 17.3251 21.3807 17.1983 21.5438 17.1147C21.7068 17.0311 21.8897 16.994 22.0724 17.0072C22.2552 17.0205 22.4307 17.0837 22.58 17.19L28 21.06V10.94L22.58 14.81C22.4307 14.9163 22.2552 14.9795 22.0724 14.9928C21.8897 15.006 21.7068 14.9689 21.5438 14.8853C21.3807 14.8017 21.2438 14.6749 21.1479 14.5188C21.052 14.3627 21.0008 14.1832 21 14V8H4V16H2V8C2 7.46957 2.21071 6.96086 2.58579 6.58579C2.96086 6.21071 3.46957 6 4 6H21C21.5304 6 22.0391 6.21071 22.4142 6.58579C22.7893 6.96086 23 7.46957 23 8V12.06L28.42 8.19C28.5693 8.08373 28.7448 8.02051 28.9276 8.00724C29.1103 7.99396 29.2932 8.03115 29.4562 8.11473C29.6193 8.19832 29.7562 8.32509 29.8521 8.48121C29.948 8.63732 29.9992 8.81678 30 9V23C29.9992 23.1832 29.948 23.3627 29.8521 23.5188C29.7562 23.6749 29.6193 23.8017 29.4562 23.8853C29.2932 23.9689 29.1103 24.006 28.9276 23.9928C28.7448 23.9795 28.5693 23.9163 28.42 23.81L23 19.94V24C23 24.5304 22.7893 25.0391 22.4142 25.4142C22.0391 25.7893 21.5304 26 21 26ZM4.66667 19.3333V17.3333H12.6667V25.3333H10.6667V20.7473L4.08067 27.3333L2.66667 25.9193L9.25267 19.3333H4.66667Z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconVoiceActivityDetection.svelte b/packages/widgets/src/lib/components/Icons/IconVoiceActivityDetection.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..c896fee502d47582dc6c5815f7f7ed71dd783563
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconVoiceActivityDetection.svelte
@@ -0,0 +1,23 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path
+		fill-rule="evenodd"
+		clip-rule="evenodd"
+		d="M6.34483 1.96552C5.1929 1.9668 4.08852 2.42496 3.27398 3.2395C2.45945 4.05404 2.00128 5.15842 2 6.31035H2.96552C2.96552 5.4141 3.32155 4.55456 3.95529 3.92081C4.58904 3.28707 5.44858 2.93103 6.34483 2.93103C7.24108 2.93103 8.10062 3.28707 8.73436 3.92081C9.3681 4.55456 9.72414 5.4141 9.72414 6.31035C9.72587 6.90867 9.57072 7.497 9.27416 8.01667C8.97761 8.53634 8.55001 8.96919 8.034 9.27207L7.7931 9.4111V10.8946C7.79547 11.0882 7.75813 11.2802 7.68341 11.4588C7.60868 11.6374 7.49814 11.7988 7.35862 11.933C7.08012 12.2328 6.7171 12.4409 6.31768 12.5297C5.91826 12.6185 5.50128 12.5838 5.122 12.4303C4.77411 12.2798 4.47685 12.0325 4.26568 11.7177C4.0545 11.403 3.93834 11.0341 3.93103 10.6552H2.96552C2.97422 11.222 3.14591 11.7743 3.46009 12.2462C3.77426 12.718 4.21764 13.0895 4.73724 13.3161C5.08947 13.4705 5.46979 13.5503 5.85435 13.5508C6.66479 13.5337 7.43811 13.2076 8.01614 12.6393C8.25262 12.4147 8.44052 12.144 8.56823 11.8439C8.69593 11.5438 8.76073 11.2207 8.75862 10.8946V9.9571C9.35428 9.55362 9.84183 9.01013 10.1785 8.37432C10.5152 7.73851 10.6907 7.02979 10.6897 6.31035C10.6884 5.15842 10.2302 4.05404 9.41567 3.2395C8.60114 2.42496 7.49676 1.9668 6.34483 1.96552ZM10.2069 15.4828V14.5172C11.4868 14.5158 12.7139 14.0068 13.619 13.1017C14.524 12.1967 15.0331 10.9696 15.0345 9.68966H16C15.9983 11.2256 15.3875 12.6981 14.3014 13.7842C13.2153 14.8702 11.7428 15.4811 10.2069 15.4828ZM10.2069 13.5517V12.5862C10.9748 12.5853 11.7111 12.2799 12.2541 11.7368C12.7971 11.1938 13.1026 10.4576 13.1034 9.68966H14.069C14.0677 10.7135 13.6604 11.6951 12.9364 12.4191C12.2124 13.1431 11.2308 13.5504 10.2069 13.5517ZM10.2069 10.6552V11.6207C10.7189 11.6202 11.2098 11.4166 11.5718 11.0545C11.9338 10.6925 12.1374 10.2016 12.1379 9.68966H11.1724C11.1722 9.94565 11.0704 10.1911 10.8893 10.3721C10.7083 10.5531 10.4629 10.6549 10.2069 10.6552ZM6.64823 4.89281C6.43337 4.84642 6.21077 4.84944 5.99724 4.90165C5.71804 4.9704 5.46559 5.12064 5.27203 5.33328C5.07846 5.54591 4.95252 5.81132 4.91024 6.09573C4.86795 6.38015 4.91122 6.67072 5.03456 6.93047C5.15789 7.19022 5.3557 7.40741 5.60283 7.55441C5.97226 7.7669 6.27967 8.07234 6.49452 8.44039C6.70938 8.80845 6.8242 9.22631 6.82759 9.65248V10.6552H5.86207V9.65296C5.85793 9.39379 5.78607 9.1402 5.65362 8.91739C5.52117 8.69458 5.33274 8.51029 5.10703 8.38282C4.66676 8.11775 4.3219 7.71987 4.12204 7.24641C3.92217 6.77295 3.87761 6.24831 3.99476 5.74793C4.09577 5.31261 4.31683 4.91433 4.63282 4.59834C4.94882 4.28234 5.34709 4.06128 5.78241 3.96027C6.13945 3.87659 6.51077 3.87468 6.86865 3.95468C7.22652 4.03469 7.56169 4.19453 7.8491 4.42227C8.13342 4.64789 8.36295 4.93497 8.52047 5.26196C8.67799 5.58896 8.75941 5.94738 8.75862 6.31034H7.7931C7.79296 6.09052 7.74288 5.87361 7.64664 5.67598C7.55041 5.47834 7.41053 5.30515 7.23757 5.16949C7.06462 5.03382 6.8631 4.93921 6.64823 4.89281Z"
+		fill="currentColor"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/Icons/IconZeroShotClassification.svelte b/packages/widgets/src/lib/components/Icons/IconZeroShotClassification.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..ebbe940ef2409569fd2e58573e00d7e7e5193de6
--- /dev/null
+++ b/packages/widgets/src/lib/components/Icons/IconZeroShotClassification.svelte
@@ -0,0 +1,51 @@
+<script lang="ts">
+	export let classNames = "";
+</script>
+
+<svg
+	class={classNames}
+	xmlns="http://www.w3.org/2000/svg"
+	xmlns:xlink="http://www.w3.org/1999/xlink"
+	aria-hidden="true"
+	fill="currentColor"
+	focusable="false"
+	role="img"
+	width="1em"
+	height="1em"
+	preserveAspectRatio="xMidYMid meet"
+	viewBox="0 0 18 18"
+>
+	<path
+		d="M16.7125 8.75625H9.64375V1.6875H8.55625V8.75625H1.4875V9.84375H8.55625V16.9125H9.64375V9.84375H16.7125V8.75625Z"
+	/>
+	<path
+		d="M3.11875 16.9125C2.79612 16.9125 2.48073 16.8168 2.21247 16.6376C1.94421 16.4584 1.73513 16.2036 1.61167 15.9055C1.4882 15.6074 1.4559 15.2794 1.51884 14.963C1.58178 14.6466 1.73714 14.3559 1.96528 14.1278C2.19341 13.8997 2.48407 13.7443 2.8005 13.6814C3.11694 13.6184 3.44493 13.6507 3.743 13.7742C4.04107 13.8976 4.29584 14.1067 4.47508 14.375C4.65432 14.6432 4.75 14.9586 4.75 15.2813C4.74956 15.7138 4.57756 16.1284 4.27174 16.4343C3.96591 16.7401 3.55125 16.9121 3.11875 16.9125V16.9125ZM3.11875 14.7375C3.0112 14.7375 2.90607 14.7694 2.81665 14.8291C2.72724 14.8889 2.65754 14.9738 2.61639 15.0732C2.57523 15.1725 2.56446 15.2819 2.58544 15.3873C2.60642 15.4928 2.65821 15.5897 2.73426 15.6657C2.8103 15.7418 2.90719 15.7936 3.01267 15.8146C3.11814 15.8355 3.22747 15.8248 3.32683 15.7836C3.42619 15.7425 3.51111 15.6728 3.57086 15.5834C3.63061 15.4939 3.6625 15.3888 3.6625 15.2813C3.66235 15.1371 3.60502 14.9989 3.50308 14.8969C3.40113 14.795 3.26291 14.7377 3.11875 14.7375Z"
+	/>
+	<path
+		d="M4.75 4.95C4.42737 4.95 4.11198 4.85433 3.84372 4.67509C3.57547 4.49584 3.36639 4.24107 3.24292 3.943C3.11945 3.64493 3.08715 3.31694 3.15009 3.00051C3.21303 2.68408 3.3684 2.39342 3.59653 2.16528C3.82466 1.93715 4.11533 1.78179 4.43176 1.71884C4.74819 1.6559 5.07618 1.68821 5.37425 1.81167C5.67232 1.93514 5.92709 2.14422 6.10633 2.41248C6.28558 2.68073 6.38125 2.99612 6.38125 3.31875C6.38082 3.75125 6.20881 4.16592 5.90299 4.47174C5.59716 4.77757 5.1825 4.94957 4.75 4.95ZM4.75 2.775C4.64245 2.775 4.53733 2.80689 4.44791 2.86664C4.35849 2.92639 4.28879 3.01131 4.24764 3.11067C4.20648 3.21002 4.19572 3.31935 4.2167 3.42483C4.23768 3.53031 4.28946 3.62719 4.36551 3.70324C4.44155 3.77928 4.53844 3.83107 4.64392 3.85205C4.7494 3.87303 4.85873 3.86227 4.95808 3.82111C5.05744 3.77995 5.14236 3.71026 5.20211 3.62084C5.26186 3.53142 5.29375 3.42629 5.29375 3.31875C5.2936 3.17458 5.23627 3.03636 5.13433 2.93442C5.03239 2.83248 4.89417 2.77514 4.75 2.775Z"
+	/>
+	<path
+		d="M12.3625 7.66875C12.0399 7.66875 11.7245 7.57308 11.4562 7.39384C11.188 7.21459 10.9789 6.95982 10.8554 6.66175C10.732 6.36368 10.6996 6.03569 10.7626 5.71926C10.8255 5.40283 10.9809 5.11217 11.209 4.88403C11.4372 4.6559 11.7278 4.50054 12.0443 4.43759C12.3607 4.37465 12.6887 4.40696 12.9867 4.53042C13.2848 4.65389 13.5396 4.86297 13.7188 5.13123C13.8981 5.39948 13.9937 5.71487 13.9937 6.0375C13.9933 6.47 13.8213 6.88467 13.5155 7.19049C13.2097 7.49632 12.795 7.66832 12.3625 7.66875ZM12.3625 5.49375C12.255 5.49375 12.1498 5.52564 12.0604 5.58539C11.971 5.64514 11.9013 5.73006 11.8601 5.82942C11.819 5.92877 11.8082 6.0381 11.8292 6.14358C11.8502 6.24906 11.902 6.34595 11.978 6.42199C12.0541 6.49803 12.1509 6.54982 12.2564 6.5708C12.3619 6.59178 12.4712 6.58102 12.5706 6.53986C12.6699 6.4987 12.7549 6.42901 12.8146 6.33959C12.8744 6.25017 12.9062 6.14504 12.9062 6.0375C12.9061 5.89333 12.8488 5.75511 12.7468 5.65317C12.6449 5.55123 12.5067 5.49389 12.3625 5.49375Z"
+	/>
+	<path
+		d="M6.38125 7.66876C6.98186 7.66876 7.46875 7.18187 7.46875 6.58126C7.46875 5.98065 6.98186 5.49376 6.38125 5.49376C5.78064 5.49376 5.29375 5.98065 5.29375 6.58126C5.29375 7.18187 5.78064 7.66876 6.38125 7.66876Z"
+	/>
+	<path
+		d="M6.38125 13.1063C6.98186 13.1063 7.46875 12.6194 7.46875 12.0188C7.46875 11.4181 6.98186 10.9313 6.38125 10.9313C5.78064 10.9313 5.29375 11.4181 5.29375 12.0188C5.29375 12.6194 5.78064 13.1063 6.38125 13.1063Z"
+	/>
+	<path
+		d="M11.8187 13.1063C12.4194 13.1063 12.9062 12.6194 12.9062 12.0188C12.9062 11.4181 12.4194 10.9313 11.8187 10.9313C11.2181 10.9313 10.7312 11.4181 10.7312 12.0188C10.7312 12.6194 11.2181 13.1063 11.8187 13.1063Z"
+	/>
+	<path
+		d="M12.3625 16.9125C12.9631 16.9125 13.45 16.4256 13.45 15.825C13.45 15.2244 12.9631 14.7375 12.3625 14.7375C11.7619 14.7375 11.275 15.2244 11.275 15.825C11.275 16.4256 11.7619 16.9125 12.3625 16.9125Z"
+	/>
+	<path
+		d="M15.625 14.7375C16.2256 14.7375 16.7125 14.2506 16.7125 13.65C16.7125 13.0494 16.2256 12.5625 15.625 12.5625C15.0244 12.5625 14.5375 13.0494 14.5375 13.65C14.5375 14.2506 15.0244 14.7375 15.625 14.7375Z"
+	/>
+	<path
+		d="M2.575 7.66876C3.17561 7.66876 3.6625 7.18187 3.6625 6.58126C3.6625 5.98065 3.17561 5.49376 2.575 5.49376C1.97439 5.49376 1.4875 5.98065 1.4875 6.58126C1.4875 7.18187 1.97439 7.66876 2.575 7.66876Z"
+	/>
+	<path
+		d="M15.625 3.8625C16.2256 3.8625 16.7125 3.37561 16.7125 2.775C16.7125 2.17439 16.2256 1.6875 15.625 1.6875C15.0244 1.6875 14.5375 2.17439 14.5375 2.775C14.5375 3.37561 15.0244 3.8625 15.625 3.8625Z"
+	/>
+</svg>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..faf06bf8ee498e07fbd4721396535c0d710d9872
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte
@@ -0,0 +1,100 @@
+<script lang="ts">
+	import type { SvelteComponent } from "svelte";
+	import type { WidgetProps } from "./shared/types.js";
+
+	import AudioClassificationWidget from "./widgets/AudioClassificationWidget/AudioClassificationWidget.svelte";
+	import AudioToAudioWidget from "./widgets/AudioToAudioWidget/AudioToAudioWidget.svelte";
+	import AutomaticSpeechRecognitionWidget from "./widgets/AutomaticSpeechRecognitionWidget/AutomaticSpeechRecognitionWidget.svelte";
+	import ConversationalWidget from "./widgets/ConversationalWidget/ConversationalWidget.svelte";
+	import FeatureExtractionWidget from "./widgets/FeatureExtractionWidget/FeatureExtractionWidget.svelte";
+	import FillMaskWidget from "./widgets/FillMaskWidget/FillMaskWidget.svelte";
+	import ImageClassificationWidget from "./widgets/ImageClassificationWidget/ImageClassificationWidget.svelte";
+	import ImageSegmentationWidget from "./widgets/ImageSegmentationWidget/ImageSegmentationWidget.svelte";
+	import ImageToImageWidget from "./widgets/ImageToImageWidget/ImageToImageWidget.svelte";
+	import ImageToTextWidget from "./widgets/ImageToTextWidget/ImageToTextWidget.svelte";
+	import ObjectDetectionWidget from "./widgets/ObjectDetectionWidget/ObjectDetectionWidget.svelte";
+	import QuestionAnsweringWidget from "./widgets/QuestionAnsweringWidget/QuestionAnsweringWidget.svelte";
+	import ReinforcementLearningWidget from "./widgets/ReinforcementLearningWidget/ReinforcementLearningWidget.svelte";
+	import SentenceSimilarityWidget from "./widgets/SentenceSimilarityWidget/SentenceSimilarityWidget.svelte";
+	import SummarizationWidget from "./widgets/SummarizationWidget/SummarizationWidget.svelte";
+	import TableQuestionAnsweringWidget from "./widgets/TableQuestionAnsweringWidget/TableQuestionAnsweringWidget.svelte";
+	import TabularDataWidget from "./widgets/TabularDataWidget/TabularDataWidget.svelte";
+	import TextGenerationWidget from "./widgets/TextGenerationWidget/TextGenerationWidget.svelte";
+	import TextToImageWidget from "./widgets/TextToImageWidget/TextToImageWidget.svelte";
+	import TextToSpeechWidget from "./widgets/TextToSpeechWidget/TextToSpeechWidget.svelte";
+	import TokenClassificationWidget from "./widgets/TokenClassificationWidget/TokenClassificationWidget.svelte";
+	import VisualQuestionAnsweringWidget from "./widgets/VisualQuestionAnsweringWidget/VisualQuestionAnsweringWidget.svelte";
+	import ZeroShotClassificationWidget from "./widgets/ZeroShowClassificationWidget/ZeroShotClassificationWidget.svelte";
+	import ZeroShotImageClassificationWidget from "./widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte";
+	import type { PipelineType } from "@huggingface/tasks";
+
+	export let apiToken: WidgetProps["apiToken"] = undefined;
+	export let callApiOnMount = false;
+	export let apiUrl = "https://api-inference.huggingface.co";
+	export let model: WidgetProps["model"];
+	export let noTitle = false;
+	export let shouldUpdateUrl = false;
+	export let includeCredentials = false;
+	export let isLoggedIn = false;
+
+	// Note: text2text-generation, text-generation and translation all
+	// uses the TextGenerationWidget as they work almost the same.
+	// Same goes for fill-mask and text-classification.
+	// In the future it may be useful / easier to maintain if we created
+	// a single dedicated widget for each pipeline type.
+	const WIDGET_COMPONENTS: {
+		[key in PipelineType]?: typeof SvelteComponent;
+	} = {
+		"audio-to-audio": AudioToAudioWidget,
+		"audio-classification": AudioClassificationWidget,
+		"automatic-speech-recognition": AutomaticSpeechRecognitionWidget,
+		conversational: ConversationalWidget,
+		"feature-extraction": FeatureExtractionWidget,
+		"fill-mask": FillMaskWidget,
+		"image-classification": ImageClassificationWidget,
+		"image-to-image": ImageToImageWidget,
+		"image-to-text": ImageToTextWidget,
+		"image-segmentation": ImageSegmentationWidget,
+		"object-detection": ObjectDetectionWidget,
+		"question-answering": QuestionAnsweringWidget,
+		"sentence-similarity": SentenceSimilarityWidget,
+		summarization: SummarizationWidget,
+		"table-question-answering": TableQuestionAnsweringWidget,
+		"text2text-generation": TextGenerationWidget,
+		"text-classification": FillMaskWidget,
+		"text-generation": TextGenerationWidget,
+		"token-classification": TokenClassificationWidget,
+		"text-to-image": TextToImageWidget,
+		"text-to-speech": TextToSpeechWidget,
+		"text-to-audio": TextToSpeechWidget,
+		translation: TextGenerationWidget,
+		"tabular-classification": TabularDataWidget,
+		"tabular-regression": TabularDataWidget,
+		"visual-question-answering": VisualQuestionAnsweringWidget,
+		"reinforcement-learning": ReinforcementLearningWidget,
+		"zero-shot-classification": ZeroShotClassificationWidget,
+		"document-question-answering": VisualQuestionAnsweringWidget,
+		"zero-shot-image-classification": ZeroShotImageClassificationWidget,
+	};
+
+	$: widgetComponent =
+		model.pipeline_tag && model.pipeline_tag in WIDGET_COMPONENTS
+			? WIDGET_COMPONENTS[model.pipeline_tag as keyof typeof WIDGET_COMPONENTS]
+			: undefined;
+
+	// prettier-ignore
+	$: widgetProps = ({
+		apiToken,
+		apiUrl,
+		callApiOnMount,
+		model,
+		noTitle,
+		shouldUpdateUrl,
+		includeCredentials,
+		isLoggedIn,
+	}) as WidgetProps;
+</script>
+
+{#if widgetComponent}
+	<svelte:component this={widgetComponent} {...widgetProps} />
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAddSentenceBtn/WidgetAddSentenceBtn.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAddSentenceBtn/WidgetAddSentenceBtn.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..7eca76ae45429333b59d3ea234b80862bb6359a4
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAddSentenceBtn/WidgetAddSentenceBtn.svelte
@@ -0,0 +1,12 @@
+<script lang="ts">
+	export let isDisabled = false;
+	export let label = "Add Sentence";
+	export let onClick: (e: MouseEvent) => void;
+</script>
+
+{#if !isDisabled}
+	<!-- content here -->
+	<button class="btn-widget h-10 w-full px-5" disabled={isDisabled} on:click|preventDefault={onClick} type="submit">
+		{label}
+	</button>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAudioTrack/WidgetAudioTrack.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAudioTrack/WidgetAudioTrack.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..b17f29a561a142068eb9197951ee65ad55c5abcb
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAudioTrack/WidgetAudioTrack.svelte
@@ -0,0 +1,17 @@
+<script lang="ts">
+	export let autoplay = false;
+	export let classNames = "";
+	export let controls = true;
+	export let label = "";
+	export let src: string | undefined = undefined;
+</script>
+
+<div class={classNames}>
+	{#if $$slots.default}
+		<slot />
+	{:else if label.length}
+		<div class="mb-1.5 truncate text-sm text-gray-500">{label}</div>
+	{/if}
+	<!-- svelte-ignore a11y-media-has-caption -->
+	<audio {autoplay} {controls} {src} />
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetBloomDecoding/WidgetBloomDecoding.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetBloomDecoding/WidgetBloomDecoding.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..19d6306456089fc7f46fe928109ef876b1037965
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetBloomDecoding/WidgetBloomDecoding.svelte
@@ -0,0 +1,55 @@
+<script lang="ts">
+	let isPromptTipOpen = false;
+	export let decodingStrategy: "sampling" | "greedy" = "sampling";
+	$: isSampling = decodingStrategy === "sampling";
+	$: description =
+		decodingStrategy === "sampling"
+			? 'Switch to "greedy" for more accurate completion e.g. math/history/translations (but which may be repetitive/less inventive)'
+			: 'Switch to "sampling" for more imaginative completions e.g. story/poetry (but which may be less accurate)';
+
+	function toggleState() {
+		decodingStrategy = decodingStrategy === "sampling" ? "greedy" : "sampling";
+	}
+</script>
+
+<svelte:window on:click={() => (isPromptTipOpen = false)} />
+
+<div>
+	<div class="flex w-full justify-between">
+		<div class="flex items-center gap-x-2">
+			<span class="transition-opacity {isSampling ? 'opacity-80' : 'opacity-40'}">sampling</span>
+			<!-- svelte-ignore a11y-click-events-have-key-events -->
+			<div class="cursor-pointer" on:click={toggleState}>
+				<div class="relative h-2 w-8 rounded-full border-2 border-blue-200 dark:border-blue-800">
+					<div
+						class="absolute -mt-1.5 h-4 w-4 rounded-full bg-blue-400 transition-transform dark:bg-blue-600 {!isSampling
+							? 'translate-x-3.5'
+							: '-translate-x-0.5'}"
+					/>
+				</div>
+			</div>
+			<span class="transition-opacity {!isSampling ? 'opacity-80' : 'opacity-40'}">greedy</span>
+		</div>
+		<div class="relative">
+			<!-- svelte-ignore a11y-click-events-have-key-events -->
+			<span class="cursor-pointer text-xs" on:click|stopPropagation={() => (isPromptTipOpen = true)}
+				>ⓘ <span class="underline">BLOOM prompting tips</span></span
+			>
+			{#if isPromptTipOpen}
+				<div
+					class="absolute right-0 z-10 w-56 rounded bg-gray-100 p-2 text-xs text-gray-700 dark:bg-gray-800 dark:text-gray-300"
+				>
+					A good prompt: Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion
+					model. For the best behaviours: MIMIC a few words of a webpage similar to the content you want to generate.
+					Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a
+					coherent follow-up.
+				</div>
+			{/if}
+		</div>
+	</div>
+	<p
+		class="my-1 rounded border border-gray-200 bg-gray-100 py-0.5 px-1.5 leading-none text-gray-700 opacity-70 dark:bg-gray-800 dark:text-gray-300"
+	>
+		{description}
+	</p>
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetCheckbox/WidgetCheckbox.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetCheckbox/WidgetCheckbox.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..e4e975e81f262640d72009967cb58f912b44549f
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetCheckbox/WidgetCheckbox.svelte
@@ -0,0 +1,17 @@
+<script lang="ts">
+	import WidgetLabel from "../WidgetLabel/WidgetLabel.svelte";
+
+	export let classNames = "";
+	export let checked: boolean;
+	export let label: string;
+</script>
+
+<WidgetLabel classNames="inline-flex items-center {classNames}" {label}>
+	<svelte:fragment slot="before">
+		<input
+			bind:checked
+			class="mr-2 cursor-pointer rounded border-transparent bg-gray-200 text-blue-500 checked:bg-blue-500 focus:ring-1 focus:ring-blue-200 focus:ring-offset-2 dark:bg-gray-700 dark:checked:bg-blue-500 dark:focus:ring-gray-500 dark:focus:ring-offset-gray-925"
+			type="checkbox"
+		/>
+	</svelte:fragment>
+</WidgetLabel>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetDropzone/WidgetDropzone.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetDropzone/WidgetDropzone.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..3cfb0f007df2465df58dca8dcf24ca1b7e7543b9
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetDropzone/WidgetDropzone.svelte
@@ -0,0 +1,89 @@
+<script lang="ts">
+	import IconSpin from "$lib/components/Icons/IconSpin.svelte";
+	import { getBlobFromUrl } from "../../shared/helpers.js";
+
+	export let accept = "image/*";
+	export let classNames = "";
+	export let isLoading = false;
+	export let isDisabled = false;
+	export let imgSrc = "";
+	export let label = "Drag image file here or click to browse from your device";
+	export let onSelectFile: (file: File | Blob) => void;
+	export let onError: (e: string) => void;
+
+	let fileInput: HTMLInputElement;
+	let isDragging = false;
+
+	function onChange() {
+		const file = fileInput.files?.[0];
+		if (file) {
+			onSelectFile(file);
+		}
+	}
+
+	async function onDrop(e: DragEvent) {
+		isDragging = false;
+		const itemList = e.dataTransfer?.items;
+		if (!itemList || isLoading) {
+			return;
+		}
+		const items = Array.from(itemList);
+		const uriItem = items.find((x) => x.kind === "string" && x.type === "text/uri-list");
+		const fileItem = items.find((x) => x.kind === "file");
+		if (uriItem) {
+			const url = await new Promise<string>((resolve) => uriItem.getAsString((s) => resolve(s)));
+			const file = await getBlobFromUrl(url);
+
+			onSelectFile(file);
+		} else if (fileItem) {
+			const file = fileItem.getAsFile();
+			if (file) {
+				onSelectFile(file);
+			}
+		} else {
+			onError(`Unrecognized dragged and dropped file or element.`);
+		}
+	}
+</script>
+
+<input
+	{accept}
+	bind:this={fileInput}
+	on:change={onChange}
+	disabled={isLoading || isDisabled}
+	style="display: none;"
+	type="file"
+/>
+<!-- svelte-ignore a11y-click-events-have-key-events -->
+<div
+	class="relative cursor-pointer rounded border-2 border-dashed px-3 py-7 text-center
+		{isDisabled ? 'pointer-events-none' : ''}
+		{isDragging ? 'border-green-300 bg-green-50 text-green-500' : 'text-gray-500'}
+		{classNames}"
+	on:click={() => {
+		fileInput.click();
+	}}
+	on:dragenter={() => {
+		isDragging = true;
+	}}
+	on:dragleave={() => {
+		isDragging = false;
+	}}
+	on:dragover|preventDefault
+	on:drop|preventDefault={onDrop}
+>
+	{#if !imgSrc && !isDisabled}
+		<span class="pointer-events-none text-sm">{label}</span>
+	{:else}
+		<div class={isDragging ? "pointer-events-none" : ""}>
+			<slot />
+		</div>
+	{/if}
+	{#if isLoading}
+		<div
+			class="absolute top-1/2 left-1/2 flex h-12 w-12 -translate-x-1/2 -translate-y-1/2 transform items-center justify-center rounded-full border border-gray-100 bg-white shadow"
+		>
+			<IconSpin classNames="text-purple-500 animate-spin h-6 w-6" />
+		</div>
+	{/if}
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetExample.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetExample.ts
new file mode 100644
index 0000000000000000000000000000000000000000..6428e7b0a429fd5024b50a034366a31a683db5c1
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetExample.ts
@@ -0,0 +1,114 @@
+type TableData = Record<string, (string | number)[]>;
+
+//#region outputs
+export type WidgetExampleOutputLabels = Array<{ label: string; score: number }>;
+export interface WidgetExampleOutputAnswerScore {
+	answer: string;
+	score: number;
+}
+export interface WidgetExampleOutputText {
+	text: string;
+}
+export interface WidgetExampleOutputUrl {
+	url: string;
+}
+
+export type WidgetExampleOutput =
+	| WidgetExampleOutputLabels
+	| WidgetExampleOutputAnswerScore
+	| WidgetExampleOutputText
+	| WidgetExampleOutputUrl;
+//#endregion
+
+export interface WidgetExampleBase<TOutput> {
+	example_title?: string;
+	group?: string;
+	/**
+	 * Potential overrides to API parameters for this specific example
+	 * (takes precedences over the model card metadata's inference.parameters)
+	 */
+	parameters?: {
+		/// token-classification
+		aggregation_strategy?: string;
+		/// text-generation
+		top_k?: number;
+		top_p?: number;
+		temperature?: number;
+		max_new_tokens?: number;
+		do_sample?: boolean;
+		/// text-to-image
+		negative_prompt?: string;
+		guidance_scale?: number;
+		num_inference_steps?: number;
+	};
+	/**
+	 * Optional output
+	 */
+	output?: TOutput;
+}
+
+export interface WidgetExampleTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
+	text: string;
+}
+
+export interface WidgetExampleTextAndContextInput<TOutput = WidgetExampleOutput>
+	extends WidgetExampleTextInput<TOutput> {
+	context: string;
+}
+
+export interface WidgetExampleTextAndTableInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
+	table: TableData;
+}
+
+export interface WidgetExampleAssetInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
+	src: string;
+}
+export interface WidgetExampleAssetAndPromptInput<TOutput = WidgetExampleOutput>
+	extends WidgetExampleAssetInput<TOutput> {
+	prompt: string;
+}
+
+export type WidgetExampleAssetAndTextInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> &
+	WidgetExampleTextInput<TOutput>;
+
+export type WidgetExampleAssetAndZeroShotInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> &
+	WidgetExampleZeroShotTextInput<TOutput>;
+
+export interface WidgetExampleStructuredDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
+	structured_data: TableData;
+}
+
+export interface WidgetExampleTableDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
+	table: TableData;
+}
+
+export interface WidgetExampleZeroShotTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
+	text: string;
+	candidate_labels: string;
+	multi_class: boolean;
+}
+
+export interface WidgetExampleSentenceSimilarityInput<TOutput = WidgetExampleOutput>
+	extends WidgetExampleBase<TOutput> {
+	source_sentence: string;
+	sentences: string[];
+}
+
+//#endregion
+
+export type WidgetExample<TOutput = WidgetExampleOutput> =
+	| WidgetExampleTextInput<TOutput>
+	| WidgetExampleTextAndContextInput<TOutput>
+	| WidgetExampleTextAndTableInput<TOutput>
+	| WidgetExampleAssetInput<TOutput>
+	| WidgetExampleAssetAndPromptInput<TOutput>
+	| WidgetExampleAssetAndTextInput<TOutput>
+	| WidgetExampleAssetAndZeroShotInput<TOutput>
+	| WidgetExampleStructuredDataInput<TOutput>
+	| WidgetExampleTableDataInput<TOutput>
+	| WidgetExampleZeroShotTextInput<TOutput>
+	| WidgetExampleSentenceSimilarityInput<TOutput>;
+
+type KeysOfUnion<T> = T extends unknown ? keyof T : never;
+
+export type WidgetExampleAttribute = KeysOfUnion<WidgetExample>;
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFileInput/WidgetFileInput.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFileInput/WidgetFileInput.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..9e4c6a54f6fe067142d497c92a87de2d94f5c088
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFileInput/WidgetFileInput.svelte
@@ -0,0 +1,56 @@
+<script lang="ts">
+	import IconSpin from "$lib/components/Icons/IconSpin.svelte";
+	import IconFile from "$lib/components/Icons/IconFile.svelte";
+
+	export let accept: string | undefined;
+	export let classNames = "";
+	export let isLoading = false;
+	export let isDisabled = false;
+	export let label = "Browse for file";
+	export let onSelectFile: (file: File | Blob) => void;
+
+	let fileInput: HTMLInputElement;
+	let isDragging = false;
+
+	function onChange() {
+		const file = fileInput.files?.[0];
+		if (file) {
+			onSelectFile(file);
+		}
+	}
+</script>
+
+{#if !isDisabled}
+	<div
+		class={classNames}
+		on:dragenter={() => {
+			isDragging = true;
+		}}
+		on:dragover|preventDefault
+		on:dragleave={() => {
+			isDragging = false;
+		}}
+		on:drop|preventDefault={(e) => {
+			isDragging = false;
+			fileInput.files = e.dataTransfer?.files ?? null;
+			onChange();
+		}}
+	>
+		<label class="btn-widget {isDragging ? 'ring' : ''} {isLoading ? 'text-gray-600' : ''}">
+			{#if isLoading}
+				<IconSpin classNames="-ml-1 mr-1.5 text-gray-600 animate-spin" />
+			{:else}
+				<IconFile classNames="-ml-1 mr-1.5" />
+			{/if}
+			<input
+				{accept}
+				bind:this={fileInput}
+				on:change={onChange}
+				disabled={isLoading || isDisabled}
+				style="display: none;"
+				type="file"
+			/>
+			{label}
+		</label>
+	</div>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFooter/WidgetFooter.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFooter/WidgetFooter.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..1ff6749498e0dea03461af2ba5285bdf4eeacc3b
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFooter/WidgetFooter.svelte
@@ -0,0 +1,33 @@
+<script lang="ts">
+	import IconCode from "$lib/components/Icons/IconCode.svelte";
+	import IconMaximize from "$lib/components/Icons/IconMaximize.svelte";
+
+	export let onClickMaximizeBtn: () => void;
+	export let outputJson: string;
+	export let isDisabled = false;
+
+	let isOutputJsonVisible = false;
+</script>
+
+<div class="mt-auto flex items-center pt-4 text-xs text-gray-500">
+	{#if !isDisabled}
+		<button
+			class="flex items-center {outputJson ? '' : 'cursor-not-allowed text-gray-300'}"
+			disabled={!outputJson}
+			on:click={() => {
+				isOutputJsonVisible = !isOutputJsonVisible;
+			}}
+		>
+			<IconCode classNames="mr-1" />
+			JSON Output
+		</button>
+	{/if}
+	<button class="ml-auto flex items-center" on:click|preventDefault={onClickMaximizeBtn}>
+		<IconMaximize classNames="mr-1" />
+		Maximize
+	</button>
+</div>
+{#if outputJson && isOutputJsonVisible}
+	<pre
+		class="mt-3 max-h-screen overflow-auto rounded bg-gray-100 p-2 text-xs text-gray-600 dark:bg-gray-800">{outputJson}</pre>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetHeader/WidgetHeader.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetHeader/WidgetHeader.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..a899908a40cbee3ca25bd0da8a576edfcff99a2d
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetHeader/WidgetHeader.svelte
@@ -0,0 +1,49 @@
+<script lang="ts">
+	import { TASKS_DATA, type PipelineType } from "@huggingface/tasks";
+	import { getPipelineTask } from "../../../../utils/ViewUtils.js";
+	import IconInfo from "$lib/components/Icons/IconInfo.svelte";
+	import IconLightning from "$lib/components/Icons/IconLightning.svelte";
+	import PipelineTag from "../../../PipelineTag/PipelineTag.svelte";
+
+	export let noTitle = false;
+	export let title: string | null = null;
+	export let pipeline: PipelineType | undefined;
+	export let isDisabled = false;
+
+	$: task = pipeline ? getPipelineTask(pipeline) : undefined;
+</script>
+
+<div class="mb-2 flex items-center font-semibold">
+	{#if !noTitle}
+		{#if title}
+			<div class="flex items-center text-lg">
+				{title}
+			</div>
+		{:else}
+			<div class="flex items-center text-lg">
+				{#if !isDisabled}
+					<IconLightning classNames="-ml-1 mr-1 text-yellow-500" />
+					Inference API
+				{:else}
+					Inference Examples
+				{/if}
+			</div>
+			<a target="_blank" href="https://huggingface.co/docs/hub/models-widgets#example-outputs">
+				<IconInfo classNames="ml-1.5 text-sm text-gray-400 hover:text-black" />
+			</a>
+		{/if}
+	{/if}
+</div>
+<div class="mb-0.5 flex w-full max-w-full flex-wrap items-center justify-between text-sm text-gray-500">
+	{#if pipeline && task}
+		<a
+			class={TASKS_DATA[task] ? "hover:underline" : undefined}
+			href={TASKS_DATA[task] ? `/tasks/${task}` : undefined}
+			target="_blank"
+			title={TASKS_DATA[task] ? `Learn more about ${task}` : undefined}
+		>
+			<PipelineTag classNames="mr-2 mb-1.5" {pipeline} />
+		</a>
+	{/if}
+	<slot />
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..f374dabef274a4c4e8d31852ddb67296f1eebb8f
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte
@@ -0,0 +1,116 @@
+<script lang="ts">
+	import { type WidgetProps, type ModelLoadInfo, LoadState, ComputeType } from "../types.js";
+	import IconAzureML from "$lib/components/Icons/IconAzureML.svelte";
+	import { InferenceDisplayability } from "../../../../interfaces/InferenceDisplayability.js";
+	import IconInfo from "$lib/components/Icons/IconInfo.svelte";
+
+	export let model: WidgetProps["model"];
+	export let computeTime: string;
+	export let error: string;
+	export let modelLoadInfo: ModelLoadInfo | undefined = undefined;
+	export let modelTooBig = false;
+
+	const state = {
+		[LoadState.Loadable]: "This model can be loaded on the Inference API on-demand.",
+		[LoadState.Loaded]: "This model is currently loaded and running on the Inference API.",
+		[LoadState.TooBig]:
+			"Model is too large to load onto the free Inference API. To try the model, launch it on Inference Endpoints instead.",
+		[LoadState.Error]: "⚠️ This model could not be loaded by the inference API. ⚠️",
+	} as const;
+
+	const azureState = {
+		[LoadState.Loadable]: "This model can be loaded loaded on AzureML Managed Endpoint",
+		[LoadState.Loaded]: "This model is loaded and running on AzureML Managed Endpoint",
+		[LoadState.TooBig]:
+			"Model is too large to load onto the free Inference API. To try the model, launch it on Inference Endpoints instead.",
+		[LoadState.Error]: "⚠️ This model could not be loaded.",
+	} as const;
+
+	function getStatusReport(
+		modelLoadInfo: ModelLoadInfo | undefined,
+		statuses: Record<LoadState, string>,
+		isAzure = false
+	): string {
+		if (!modelLoadInfo) {
+			return "Model state unknown";
+		}
+		if (modelLoadInfo.compute_type === ComputeType.CPU && modelLoadInfo.state === LoadState.Loaded && !isAzure) {
+			return `The model is loaded and running on <a class="hover:underline" href="https://huggingface.co/intel" target="_blank">Intel Xeon 3rd Gen Scalable CPU</a>`;
+		}
+		return statuses[modelLoadInfo.state];
+	}
+
+	function getComputeTypeMsg(): string {
+		const computeType = modelLoadInfo?.compute_type ?? ComputeType.CPU;
+		if (computeType === ComputeType.CPU) {
+			return "Intel Xeon 3rd Gen Scalable cpu";
+		}
+		return computeType;
+	}
+</script>
+
+<div class="mt-2">
+	<div class="text-xs text-gray-400">
+		{#if model.id === "bigscience/bloom"}
+			<div class="flex items-baseline">
+				<div class="flex items-center whitespace-nowrap text-gray-700">
+					<IconAzureML classNames="mr-1 flex-none" /> Powered by&nbsp;
+					<a
+						class="underline hover:text-gray-800"
+						href="https://azure.microsoft.com/products/machine-learning"
+						target="_blank">AzureML</a
+					>
+				</div>
+				<div class="border-dotter mx-2 flex flex-1 -translate-y-px border-b border-gray-100" />
+				<div>
+					{@html getStatusReport(modelLoadInfo, azureState, true)}
+				</div>
+			</div>
+		{:else if computeTime}
+			Computation time on {getComputeTypeMsg()}: {computeTime}
+		{:else if (model.inference === InferenceDisplayability.Yes || model.pipeline_tag === "reinforcement-learning") && !modelTooBig}
+			{@html getStatusReport(modelLoadInfo, state)}
+		{:else if model.inference === InferenceDisplayability.ExplicitOptOut}
+			<span class="text-sm text-gray-500">Inference API has been turned off for this model.</span>
+		{:else if model.inference === InferenceDisplayability.CustomCode}
+			<span class="text-sm text-gray-500">Inference API does not yet support model repos that contain custom code.</span
+			>
+		{:else if model.inference === InferenceDisplayability.LibraryNotDetected}
+			<span class="text-sm text-gray-500">
+				Unable to determine this model's library. Check the
+				<a class="color-inherit" href="/docs/hub/model-cards#specifying-a-library">
+					docs <IconInfo classNames="inline" />
+				</a>.
+			</span>
+		{:else if model.inference === InferenceDisplayability.PipelineNotDetected}
+			<span class="text-sm text-gray-500">
+				Unable to determine this model’s pipeline type. Check the
+				<a class="color-inherit" href="/docs/hub/models-widgets#enabling-a-widget">
+					docs <IconInfo classNames="inline" />
+				</a>.
+			</span>
+		{:else if model.inference === InferenceDisplayability.PipelineLibraryPairNotSupported}
+			<span class="text-sm text-gray-500">
+				Inference API does not yet support {model.library_name} models for this pipeline type.
+			</span>
+		{:else if modelTooBig}
+			<span class="text-sm text-gray-500">
+				Model is too large to load onto the free Inference API. To try the model, launch it on <a
+					class="underline"
+					href="https://ui.endpoints.huggingface.co/new?repository={encodeURIComponent(model.id)}"
+					>Inference Endpoints</a
+				>
+				instead.
+			</span>
+		{:else}
+			<!-- added as a failsafe but this case cannot currently happen -->
+			<span class="text-sm text-gray-500">
+				Inference API is disabled for an unknown reason. Please open a
+				<a class="color-inherit underline" href="/{model.id}/discussions/new">Discussion in the Community tab</a>.
+			</span>
+		{/if}
+	</div>
+	{#if error}
+		<div class="alert alert-error mt-3">{error}</div>
+	{/if}
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamples/WidgetInputSamples.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamples/WidgetInputSamples.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..be37c4b60b33c25c07aa35b847779722383eaffa
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamples/WidgetInputSamples.svelte
@@ -0,0 +1,98 @@
+<script lang="ts">
+	import type { ExampleRunOpts } from "../types.js";
+	import type { WidgetExample } from "../WidgetExample.js";
+
+	type TWidgetExample = $$Generic<WidgetExample>;
+
+	import { slide } from "svelte/transition";
+
+	import IconCaretDownV2 from "$lib/components/Icons/IconCaretDownV2.svelte";
+
+	export let classNames = "";
+	export let isLoading = false;
+	export let inputSamples: TWidgetExample[];
+	export let applyInputSample: (sample: TWidgetExample, opts?: ExampleRunOpts) => void;
+
+	let containerEl: HTMLElement;
+	let isOptionsVisible = false;
+	let title = "Examples";
+
+	$: {
+		// reset title on inputSamples change (i.e. input group change)
+		inputSamples;
+		title = "Examples";
+	}
+
+	function _applyInputSample(idx: number) {
+		hideOptions();
+		const sample = inputSamples[idx];
+		title = sample.example_title as string;
+		applyInputSample(sample);
+	}
+
+	function _previewInputSample(idx: number) {
+		const sample = inputSamples[idx];
+		applyInputSample(sample, { isPreview: true });
+	}
+
+	function toggleOptionsVisibility() {
+		isOptionsVisible = !isOptionsVisible;
+	}
+
+	function onClick(e: MouseEvent | TouchEvent) {
+		let targetElement = e.target;
+		do {
+			if (targetElement === containerEl) {
+				// This is a click inside. Do nothing, just return.
+				return;
+			}
+			targetElement = (targetElement as HTMLElement).parentElement;
+		} while (targetElement);
+		// This is a click outside
+		hideOptions();
+	}
+
+	function hideOptions() {
+		isOptionsVisible = false;
+	}
+</script>
+
+<svelte:window on:click={onClick} />
+
+<div
+	class="relative mb-1.5 {classNames}
+		{isLoading && 'pointer-events-none opacity-50'} 
+		{isOptionsVisible && 'z-10'}"
+	bind:this={containerEl}
+>
+	<!-- svelte-ignore a11y-click-events-have-key-events -->
+	<div
+		class="inline-flex w-32 justify-between rounded-md border border-gray-100 px-4 py-1"
+		on:click={toggleOptionsVisibility}
+	>
+		<div class="truncate text-sm">{title}</div>
+		<IconCaretDownV2
+			classNames="-mr-1 ml-2 h-5 w-5 transition ease-in-out transform {isOptionsVisible && '-rotate-180'}"
+		/>
+	</div>
+
+	{#if isOptionsVisible}
+		<div
+			class="absolute right-0 mt-1 w-full origin-top-right rounded-md ring-1 ring-black ring-opacity-10"
+			transition:slide
+		>
+			<div class="rounded-md bg-white py-1" role="none">
+				{#each inputSamples as { example_title }, i}
+					<!-- svelte-ignore a11y-click-events-have-key-events a11y-mouse-events-have-key-events -->
+					<div
+						class="cursor-pointer truncate px-4 py-2 text-sm hover:bg-gray-100 hover:text-gray-900 dark:hover:bg-gray-800 dark:hover:text-gray-200"
+						on:mouseover={() => _previewInputSample(i)}
+						on:click={() => _applyInputSample(i)}
+					>
+						{example_title}
+					</div>
+				{/each}
+			</div>
+		</div>
+	{/if}
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamplesGroup/WidgetInputSamplesGroup.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamplesGroup/WidgetInputSamplesGroup.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..97283b98841fedde314399a791b6234e9dd13e23
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamplesGroup/WidgetInputSamplesGroup.svelte
@@ -0,0 +1,81 @@
+<script lang="ts">
+	import { slide } from "svelte/transition";
+
+	import IconCaretDownV2 from "$lib/components/Icons/IconCaretDownV2.svelte";
+
+	export let classNames = "";
+	export let isLoading = false;
+	export let inputGroups: string[];
+	export let selectedInputGroup: string;
+
+	let containerEl: HTMLElement;
+	let isOptionsVisible = false;
+	let title = "Groups";
+
+	function chooseInputGroup(idx: number) {
+		hideOptions();
+		const inputGroup = inputGroups[idx];
+		title = inputGroup;
+		selectedInputGroup = inputGroup;
+	}
+
+	function toggleOptionsVisibility() {
+		isOptionsVisible = !isOptionsVisible;
+	}
+
+	function onClick(e: MouseEvent | TouchEvent) {
+		let targetElement = e.target;
+		do {
+			if (targetElement === containerEl) {
+				// This is a click inside. Do nothing, just return.
+				return;
+			}
+			targetElement = (targetElement as HTMLElement).parentElement;
+		} while (targetElement);
+		// This is a click outside
+		hideOptions();
+	}
+
+	function hideOptions() {
+		isOptionsVisible = false;
+	}
+</script>
+
+<svelte:window on:click={onClick} />
+
+<div
+	class="relative mb-1.5 {classNames}
+		{isLoading && 'pointer-events-none opacity-50'} 
+		{isOptionsVisible && 'z-10'}"
+	bind:this={containerEl}
+>
+	<!-- svelte-ignore a11y-click-events-have-key-events -->
+	<div
+		class="inline-flex w-32 justify-between rounded-md border border-gray-100 px-4 py-1"
+		on:click={toggleOptionsVisibility}
+	>
+		<div class="truncate text-sm">{title}</div>
+		<IconCaretDownV2
+			classNames="-mr-1 ml-2 h-5 w-5 transition ease-in-out transform {isOptionsVisible && '-rotate-180'}"
+		/>
+	</div>
+
+	{#if isOptionsVisible}
+		<div
+			class="absolute right-0 mt-1 w-full origin-top-right rounded-md ring-1 ring-black ring-opacity-10"
+			transition:slide
+		>
+			<div class="rounded-md bg-white py-1" role="none">
+				{#each inputGroups as inputGroup, i}
+					<!-- svelte-ignore a11y-click-events-have-key-events -->
+					<div
+						class="truncate px-4 py-2 text-sm hover:bg-gray-100 hover:text-gray-900 dark:hover:bg-gray-800 dark:hover:text-gray-200"
+						on:click={() => chooseInputGroup(i)}
+					>
+						{inputGroup}
+					</div>
+				{/each}
+			</div>
+		</div>
+	{/if}
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetLabel/WidgetLabel.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetLabel/WidgetLabel.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..03f1e5c9e62a401a095a479340169e587cc6e557
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetLabel/WidgetLabel.svelte
@@ -0,0 +1,13 @@
+<script lang="ts">
+	export let classNames = "";
+	export let forInput: string | undefined = undefined;
+	export let label: string | undefined;
+</script>
+
+<label class="block {classNames}" for={forInput}>
+	<slot name="before" />
+	{#if label}
+		<span class="text-sm text-gray-500">{label}</span>
+	{/if}
+	<slot name="after" />
+</label>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetModelLoading/WidgetModelLoading.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetModelLoading/WidgetModelLoading.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..5c7e5f7618f062d6aa98bf3e272ea216e163f6fb
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetModelLoading/WidgetModelLoading.svelte
@@ -0,0 +1,38 @@
+<script lang="ts">
+	import { onDestroy, onMount } from "svelte";
+
+	import IconSpin from "$lib/components/Icons/IconSpin.svelte";
+
+	export let estimatedTime: number;
+
+	let interval: any;
+	let progressRatio = 0;
+	let timeElapsed = 0;
+
+	onMount(() => {
+		interval = setInterval(() => {
+			timeElapsed += 1;
+			const ratio = timeElapsed / estimatedTime;
+			progressRatio = ratio < 0.96 ? ratio : 0.96;
+		}, 500);
+	});
+
+	onDestroy(() => {
+		if (interval) {
+			clearInterval(interval);
+		}
+	});
+</script>
+
+<div class="mt-3 flex h-10">
+	<div
+		class="relative z-0 flex flex-1 items-center justify-center rounded-lg bg-gray-50 text-gray-600 shadow-inner dark:bg-gray-950"
+	>
+		<div
+			class="absolute inset-y-0 left-0 rounded-lg bg-gradient-to-r from-purple-200 to-purple-100 transition-all dark:from-purple-800 dark:to-purple-900"
+			style="width: {progressRatio * 100}%;"
+		/>
+		<IconSpin classNames="text-purple-400 dark:text-purple-200 animate-spin mr-2 z-10" />
+		<span class="z-10">Model is loading</span>
+	</div>
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOuputConvoBubble/WidgetOutputConvoBubble.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOuputConvoBubble/WidgetOutputConvoBubble.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..31ab4efa56cac7e7fd431fc59325f01eb1be0691
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOuputConvoBubble/WidgetOutputConvoBubble.svelte
@@ -0,0 +1,12 @@
+<script lang="ts">
+	export let position: "left" | "right";
+	export let text: string;
+</script>
+
+<div
+	class="rounded-2xl px-3 py-2 {position === 'right'
+		? 'ml-7 bg-blue-600 text-white'
+		: 'mr-7 place-self-start bg-gray-50 dark:bg-gray-850 dark:text-gray-200'}"
+>
+	{text}
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputChart/WidgetOutputChart.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputChart/WidgetOutputChart.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..a1fe65892620fa198f86a84da759a4b55c318dc5
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputChart/WidgetOutputChart.svelte
@@ -0,0 +1,65 @@
+<!-- 
+for Tailwind:
+from-blue-400 to-blue-200 dark:from-blue-400 dark:to-blue-600
+from-cyan-400 to-cyan-200 dark:from-cyan-400 dark:to-cyan-600
+from-green-400 to-green-200 dark:from-green-400 dark:to-green-600
+from-indigo-400 to-indigo-200 dark:from-indigo-400 dark:to-indigo-600
+from-lime-400 to-lime-200 dark:from-lime-400 dark:to-lime-600
+from-orange-400 to-orange-200 dark:from-orange-400 dark:to-orange-600
+from-purple-400 to-purple-200 dark:from-purple-400 dark:to-purple-600
+from-red-400 to-red-200 dark:from-red-400 dark:to-red-600
+from-yellow-400 to-yellow-200 dark:from-yellow-400 dark:to-yellow-600
+ -->
+<script lang="ts">
+	export let classNames = "";
+	export let defaultBarColor = "purple";
+	type LabelField = "label" | "answer";
+	export let labelField: LabelField = "label";
+	export let output: Array<
+		{ label: string; score: number; color?: string } | { answer: string; score: number; color?: string }
+	> = [];
+	export let highlightIndex = -1;
+	export let mouseover: (index: number) => void = () => {};
+	export let mouseout: () => void = () => {};
+
+	$: scoreMax = Math.max(0, ...output.map((x) => x.score));
+
+	function text(outputItem: (typeof output)[0]) {
+		if (labelField in outputItem) {
+			return outputItem[labelField as keyof typeof outputItem];
+		} else {
+			return "";
+		}
+	}
+</script>
+
+{#if output.length}
+	<div class="space-y-3.5 {classNames}">
+		<!-- NB: We sadly can't do color = defaultBarColor as the Svelte compiler will throw an unused-export-let warning (bug  on their side) ... -->
+		{#each output as { score, color }, index}
+			<!-- svelte-ignore a11y-mouse-events-have-key-events -->
+			<div
+				class="animate__animated animate__fadeIn flex items-start justify-between font-mono
+					text-xs leading-none transition duration-200 ease-in-out
+					{highlightIndex !== -1 && highlightIndex !== index && 'opacity-30 grayscale filter'}
+				"
+				style="animation-delay: {0.04 * index}s"
+				on:mouseover={() => mouseover(index)}
+				on:mouseout={mouseout}
+			>
+				<div class="flex-1">
+					<div
+						class="mb-1 h-1 rounded bg-gradient-to-r
+							from-{color ?? defaultBarColor}-400
+							to-{color ?? defaultBarColor}-200
+							dark:from-{color ?? defaultBarColor}-400
+							dark:to-{color ?? defaultBarColor}-600"
+						style={`width: ${Math.ceil((score / scoreMax) * 100 * 0.8)}%;`}
+					/>
+					<span class="leading-snug">{text(output[index])}</span>
+				</div>
+				<span class="pl-2">{score.toFixed(3)}</span>
+			</div>
+		{/each}
+	</div>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputConvo/WidgetOutputConvo.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputConvo/WidgetOutputConvo.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..5a14cf9db92ad55501034c9b7c4408c6057693a1
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputConvo/WidgetOutputConvo.svelte
@@ -0,0 +1,33 @@
+<script lang="ts">
+	import { afterUpdate } from "svelte";
+
+	import { isFullyScrolled, scrollToMax } from "../../../../utils/ViewUtils.js";
+	import WidgetOutputConvoBubble from "../WidgetOuputConvoBubble/WidgetOutputConvoBubble.svelte";
+
+	export let modelId: string;
+	export let output: Array<{
+		input: string;
+		response: string;
+	}>;
+
+	let wrapperEl: HTMLElement;
+
+	afterUpdate(() => {
+		if (wrapperEl && !isFullyScrolled(wrapperEl)) {
+			scrollToMax(wrapperEl);
+		}
+	});
+</script>
+
+<div bind:this={wrapperEl} class="h-64 overflow-y-auto rounded-t-lg border border-b-0 leading-tight">
+	<div class="p-3 pt-6 text-center text-sm text-gray-400">
+		Input a message to start chatting with
+		<strong>{modelId}</strong>.
+	</div>
+	<div class="flex flex-col items-end space-y-4 p-3">
+		{#each output as exchange}
+			<WidgetOutputConvoBubble position="right" text={exchange.input} />
+			<WidgetOutputConvoBubble position="left" text={exchange.response} />
+		{/each}
+	</div>
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTableQA/WidgetOutputTableQA.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTableQA/WidgetOutputTableQA.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..410fc55c33e8c5e228bb3f0b8f126b175c7b621c
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTableQA/WidgetOutputTableQA.svelte
@@ -0,0 +1,52 @@
+<script lang="ts">
+	import { fly } from "svelte/transition";
+
+	interface Output {
+		aggregator?: string;
+		answer: string;
+		coordinates: [number, number][];
+		cells: number[];
+	}
+
+	export let output: Output;
+	export let isAnswerOnlyOutput: boolean;
+</script>
+
+<div
+	class="col-span-12 flex h-10 items-center overflow-x-auto rounded-t-lg border border-b-0 bg-gradient-to-r to-white px-3 dark:to-gray-950 {!!output
+		?.cells?.length || isAnswerOnlyOutput
+		? 'via-green border-green-50 from-green-50 dark:border-green-800 dark:from-green-800'
+		: 'via-red border-red-50 from-red-50 dark:border-red-800 dark:from-red-800'}"
+	in:fly
+>
+	{#if isAnswerOnlyOutput}
+		<span
+			class="ml-2 whitespace-nowrap rounded border border-green-200 bg-green-100 px-1 leading-tight text-green-800 dark:border-green-700 dark:bg-green-800 dark:text-green-100"
+			>{output.answer}</span
+		>
+	{:else}
+		<span class="whitespace-nowrap">
+			{#if output.cells.length}
+				{output.cells.length}
+				match{output.cells.length > 1 ? "es" : ""}
+				:
+			{:else}
+				No matches
+			{/if}
+		</span>
+		{#if output.cells.length}
+			{#each output.cells as answer}
+				<span
+					class="ml-2 whitespace-nowrap rounded border border-green-200 bg-green-100 px-1 leading-tight text-green-800 dark:border-green-700 dark:bg-green-800 dark:text-green-100"
+					>{answer}</span
+				>
+			{/each}
+			{#if output.aggregator !== "NONE"}
+				<span
+					class="ml-auto whitespace-nowrap rounded border border-blue-200 bg-blue-100 px-1 leading-tight text-blue-800 dark:border-blue-700 dark:bg-blue-800 dark:text-blue-100"
+					>{output.aggregator}</span
+				>
+			{/if}
+		{/if}
+	{/if}
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputText/WidgetOutputText.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputText/WidgetOutputText.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..e1d051506286f1216c3f712b2986dbb6e97463cb
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputText/WidgetOutputText.svelte
@@ -0,0 +1,10 @@
+<script lang="ts">
+	export let classNames: string;
+	export let output: string;
+</script>
+
+{#if output.length}
+	<p class="alert alert-success {classNames}">
+		{output}
+	</p>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTokens/WidgetOutputTokens.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTokens/WidgetOutputTokens.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..58343a2cc4bb9d7a1281e2d7bf687571ceff3fcd
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTokens/WidgetOutputTokens.svelte
@@ -0,0 +1,94 @@
+<script lang="ts">
+	import { escape, mod, sum } from "../../../../utils/ViewUtils.js";
+
+	interface Span {
+		end: number;
+		index?: string;
+		start: number;
+		type: string;
+	}
+
+	interface SpanTag {
+		span: Span;
+		tag: "start" | "end";
+	}
+
+	export let classNames = "";
+	export let output: Span[] = [];
+	export let text = "";
+
+	const COLORS = ["teal", "indigo", "orange", "sky", "violet", "purple", "fuchsia", "pink"] as const;
+
+	/**
+	 * Render a text string and its entity spans
+	 *
+	 * *see displacy-ent.js*
+	 * see https://github.com/explosion/displacy-ent/issues/2
+	 */
+	function render(text: string, spans: Span[]): string {
+		const tags: { [index: number]: SpanTag[] } = {};
+
+		const __addTag = (i: number, s: Span, tag: "start" | "end") => {
+			if (Array.isArray(tags[i])) {
+				tags[i].push({ span: s, tag: tag });
+			} else {
+				tags[i] = [{ span: s, tag: tag }];
+			}
+		};
+
+		for (const s of spans) {
+			__addTag(s.start, s, "start");
+			__addTag(s.end, s, "end");
+		}
+
+		let out = "";
+		let offset = 0;
+
+		const indexes = Object.keys(tags)
+			.map((k) => parseInt(k, 10))
+			.sort((a, b) => a - b); /// CAUTION
+		for (const i of indexes) {
+			const spanTags = tags[i];
+			if (i > offset) {
+				out += escape(text.slice(offset, i));
+			}
+
+			offset = i;
+
+			for (const spanTag of spanTags) {
+				const hash = mod(sum(Array.from(spanTag.span.type).map((x) => x.charCodeAt(0))), COLORS.length);
+				const color = COLORS[hash];
+				if (spanTag.tag === "start") {
+					out += `<span
+							data-entity="${spanTag.span.type}" data-hash="${hash}" data-index="${spanTag.span.index ?? ""}"
+							class="bg-${color}-100 text-${color}-800 rounded px-1 py-0.5 dark:text-${color}-100 dark:bg-${color}-700"
+						>`;
+				} else {
+					out += `<span
+							class="text-xs select-none bg-${color}-500 text-${color}-100 rounded font-semibold px-0.5 ml-1"
+						>${spanTag.span.type}</span></span>`;
+				}
+			}
+		}
+
+		out += escape(text.slice(offset, text.length));
+		return out;
+	}
+</script>
+
+{#if text && output.length}
+	<!-- 
+		For Tailwind:
+		bg-teal-100 text-teal-800 bg-teal-500 text-teal-100 dark:text-teal-100 dark:bg-teal-700
+		bg-indigo-100 text-indigo-800 bg-indigo-500 text-indigo-100 dark:text-indigo-100 dark:bg-indigo-700
+		bg-orange-100 text-orange-800 bg-orange-500 text-orange-100 dark:text-orange-100 dark:bg-orange-700
+		bg-sky-100 text-sky-800 bg-sky-500 text-sky-100 dark:text-sky-100 dark:bg-sky-700
+		bg-violet-100 text-violet-800 bg-violet-500 text-violet-100 dark:text-violet-100 dark:bg-violet-700
+		bg-purple-100 text-purple-800 bg-purple-500 text-purple-100 dark:text-purple-100 dark:bg-purple-700
+		bg-fuchsia-100 text-fuchsia-800 bg-fuchsia-500 text-fuchsia-100 dark:text-fuchsia-100 dark:bg-fuchsia-700
+		bg-pink-100 text-pink-800 bg-pink-500 text-pink-100 dark:text-pink-100 dark:bg-pink-700
+	-->
+	<div class="leading-8 text-gray-800 {classNames}">
+		{@html render(text, output)}
+	</div>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetQuickInput/WidgetQuickInput.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetQuickInput/WidgetQuickInput.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..3a60295b8ef5787de9aaf66c94d0d0d146ec2a14
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetQuickInput/WidgetQuickInput.svelte
@@ -0,0 +1,29 @@
+<script lang="ts">
+	import WidgetSubmitBtn from "../WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+
+	export let flatTop = false;
+	export let isLoading: boolean;
+	export let isDisabled = false;
+	export let onClickSubmitBtn: (e?: MouseEvent) => void;
+	export let placeholder = "Your sentence here...";
+	export let submitButtonLabel: string | undefined = undefined;
+	export let value: string = "";
+</script>
+
+<div class="flex h-10">
+	<input
+		bind:value
+		class="form-input-alt min-w-0 flex-1 rounded-r-none {flatTop ? 'rounded-t-none' : ''}"
+		placeholder={isDisabled ? "" : placeholder}
+		required={true}
+		type="text"
+		disabled={isLoading || isDisabled}
+	/>
+	<WidgetSubmitBtn
+		classNames="rounded-l-none border-l-0 {flatTop ? 'rounded-t-none' : ''}"
+		{isLoading}
+		{isDisabled}
+		label={submitButtonLabel}
+		onClick={onClickSubmitBtn}
+	/>
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRadio/WidgetRadio.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRadio/WidgetRadio.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..7cfb06a492e722d08045b5d41c9a186e6f6f05fb
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRadio/WidgetRadio.svelte
@@ -0,0 +1,21 @@
+<script lang="ts">
+	import WidgetLabel from "../WidgetLabel/WidgetLabel.svelte";
+
+	export let classNames = "";
+	export let group: string;
+	export let label: string;
+	export let onChange: (e: Event) => void = () => null;
+	export let value: string;
+</script>
+
+<WidgetLabel classNames="inline-flex items-center {classNames}" {label}>
+	<svelte:fragment slot="before">
+		<input
+			bind:group
+			class="mr-2 cursor-pointer border-gray-300 checked:text-blue-500 hover:border-gray-300 focus:border-gray-300 focus:ring-1 focus:ring-blue-200 focus:ring-offset-2 dark:border-gray-700 dark:focus:ring-gray-500 dark:focus:ring-offset-gray-925"
+			on:change={onChange}
+			type="radio"
+			{value}
+		/>
+	</svelte:fragment>
+</WidgetLabel>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/Recorder.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/Recorder.ts
new file mode 100644
index 0000000000000000000000000000000000000000..be0e295d56a90514f9cd379dcf716126ab0caa0b
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/Recorder.ts
@@ -0,0 +1,103 @@
+export default class Recorder {
+	// see developers.google.com/web/updates/2016/01/mediarecorder
+	type: "audio" | "video" = "audio";
+	private apiToken: string | undefined;
+	private audioContext: AudioContext | undefined;
+	private isLoggedIn = false;
+	private isModelLoaded = false;
+	private isEmptyBuffer = false;
+	private modelId: string;
+	private onError: (err: string) => void;
+	private updateModelLoading: (isLoading: boolean, estimatedTime?: number) => void;
+	private renderText: (txt: string) => void;
+	private renderWarning: (warning: string) => void;
+	private socket: WebSocket | undefined;
+	private stream: MediaStream | undefined;
+
+	constructor(
+		modelId: string,
+		apiToken: string | undefined,
+		renderText: (txt: string) => void,
+		renderWarning: (warning: string) => void,
+		onError: (err: string) => void,
+		updateModelLoading: (isLoading: boolean, estimatedTime?: number) => void
+	) {
+		this.modelId = modelId;
+		this.apiToken = apiToken || "";
+		this.renderText = renderText;
+		this.renderWarning = renderWarning;
+		this.onError = onError;
+		this.updateModelLoading = updateModelLoading;
+	}
+
+	async start(): Promise<void> {
+		const constraints: MediaStreamConstraints = this.type === "video" ? { audio: true, video: true } : { audio: true };
+		this.stream = await navigator.mediaDevices.getUserMedia(constraints);
+
+		this.socket = new WebSocket(`wss://api-inference.huggingface.co/asr/live/cpu/${this.modelId}`);
+
+		this.socket.onerror = () => {
+			this.onError("Webscoket connection error");
+		};
+
+		this.socket.onopen = () => {
+			this.socket?.send(`Bearer ${this.apiToken}`);
+		};
+
+		this.updateModelLoading(true);
+
+		this.socket.onmessage = (e: MessageEvent) => {
+			const data = JSON.parse(e.data);
+			if (data.type === "status" && data.message === "Successful login") {
+				this.isLoggedIn = true;
+			} else if (data.type === "status" && !!data.estimated_time && !this.isModelLoaded) {
+				this.updateModelLoading(true, data.estimated_time);
+			} else {
+				// data.type === "results"
+				this.isModelLoaded = true;
+				if (data.text) {
+					this.renderText(data.text);
+				} else if (!this.isEmptyBuffer) {
+					this.renderWarning("result was empty");
+				}
+			}
+		};
+
+		this.audioContext = new AudioContext();
+		await this.audioContext.audioWorklet.addModule("/audioProcessor.js");
+		const microphone = this.audioContext.createMediaStreamSource(this.stream);
+		const dataExtractor = new AudioWorkletNode(this.audioContext, "AudioDataExtractor");
+		microphone.connect(dataExtractor).connect(this.audioContext.destination);
+
+		dataExtractor.port.onmessage = (event) => {
+			const { buffer, sampling_rate: samplingRate } = event.data;
+			this.isEmptyBuffer = buffer.reduce((sum: number, x: number) => sum + x) === 0;
+			if (this.isModelLoaded && this.isEmptyBuffer) {
+				this.renderWarning("🎤 input is empty: try speaking louder 🗣️ & make sure correct mic source is selected");
+			}
+			const base64: string = btoa(String.fromCharCode(...new Uint8Array(buffer.buffer)));
+			const message = {
+				raw: base64,
+				sampling_rate: samplingRate,
+			};
+			if (this.isLoggedIn) {
+				try {
+					this.socket?.send(JSON.stringify(message));
+				} catch (e) {
+					this.onError(`Error sending data to websocket: ${e}`);
+				}
+			}
+		};
+	}
+
+	stop(): void {
+		this.isLoggedIn = false;
+		void this.audioContext?.close();
+		this.socket?.close();
+		if (this.stream) {
+			for (const t of this.stream.getTracks()) {
+				t.stop();
+			}
+		}
+	}
+}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/WidgetRealtimeRecorder.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/WidgetRealtimeRecorder.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..6056084c2381b0f041180748bcba3e5410d9f4d1
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/WidgetRealtimeRecorder.svelte
@@ -0,0 +1,98 @@
+<script lang="ts">
+	import type { WidgetProps } from "$lib/components/InferenceWidget/shared/types.js";
+	import { onDestroy, onMount } from "svelte";
+	import IconMagicWand from "$lib/components/Icons/IconMagicWand.svelte";
+	import Recorder from "./Recorder.js";
+
+	export let apiToken: WidgetProps["apiUrl"] | undefined = undefined;
+	export let classNames = "";
+	export let model: WidgetProps["model"];
+	export let updateModelLoading: (isLoading: boolean, estimatedTime?: number) => void;
+	export let onRecordStart: () => void = () => null;
+	export let onRecordStop: () => void = () => null;
+	export let onError: (err: string) => void = () => null;
+
+	let isRecording = false;
+	let recorder: Recorder;
+	let txt = "";
+	let warning = "";
+
+	async function onClick() {
+		try {
+			isRecording = !isRecording;
+			if (isRecording) {
+				onRecordStart();
+				await recorder.start();
+			} else {
+				onRecordStop();
+				txt = "";
+				updateModelLoading(false);
+				recorder.stop();
+			}
+		} catch (e) {
+			isRecording = false;
+			onRecordStop();
+			updateModelLoading(false);
+			if (e instanceof Error) {
+				switch (e.name) {
+					case "NotAllowedError": {
+						onError("Please allow access to your microphone & refresh the page");
+						break;
+					}
+					case "NotFoundError": {
+						onError("No microphone found on your device");
+						break;
+					}
+					default: {
+						onError(`${e.name}: ${e.message}`);
+						break;
+					}
+				}
+			} else {
+				onError(String(e));
+			}
+		}
+	}
+
+	function renderText(_txt: string) {
+		warning = "";
+		txt = _txt;
+		onError("");
+		updateModelLoading(false);
+	}
+
+	function renderWarning(_warning: string) {
+		warning = _warning;
+		onError("");
+		updateModelLoading(false);
+	}
+
+	onMount(() => {
+		recorder = new Recorder(model.id, apiToken, renderText, renderWarning, onError, updateModelLoading);
+	});
+
+	onDestroy(() => {
+		if (recorder) {
+			recorder.stop();
+		}
+	});
+</script>
+
+<button class="btn-widget {classNames}" on:click={onClick} type="button">
+	<div class="flex items-center {isRecording ? 'animate-pulse text-red-500' : ''}">
+		<IconMagicWand classNames="-ml-1 mr-1.5" />
+		<span>
+			{isRecording ? "Stop speech recognition" : "Realtime speech recognition"}
+		</span>
+	</div>
+</button>
+
+{#if isRecording}
+	<div class="relative top-0 left-0 my-2 inline-flex w-full items-center justify-center {!!warning && 'animate-pulse'}">
+		{#if warning}
+			<p class="opacity-50">{warning}</p>
+		{:else}
+			<p class="font-mono lowercase">{txt}</p>
+		{/if}
+	</div>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/Recorder.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/Recorder.ts
new file mode 100644
index 0000000000000000000000000000000000000000..39bed256c3e4909b1346db5b29505ad5b438449a
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/Recorder.ts
@@ -0,0 +1,69 @@
+import { delay } from "$lib/utils/ViewUtils.js";
+
+export default class Recorder {
+	// see developers.google.com/web/updates/2016/01/mediarecorder
+	type: "audio" | "video" = "audio";
+	private stream?: MediaStream;
+	private mediaRecorder?: MediaRecorder;
+	private recordedBlobs: Blob[] = [];
+	public outputBlob?: Blob;
+
+	get desiredMimeType(): string {
+		return this.type === "video" ? "video/webm" : "audio/webm";
+	}
+	get mimeType(): string {
+		if (!this.mediaRecorder) {
+			throw new Error("MediaRecorder not initialized");
+		}
+		return this.mediaRecorder.mimeType;
+	}
+	async start(): Promise<void> {
+		this.recordedBlobs = [];
+
+		const constraints: MediaStreamConstraints = this.type === "video" ? { audio: true, video: true } : { audio: true };
+		this.stream = await navigator.mediaDevices.getUserMedia(constraints);
+		this.startRecording();
+	}
+	private startRecording() {
+		if (!this.stream) {
+			throw new Error("Stream not initialized");
+		}
+		this.outputBlob = undefined;
+		this.mediaRecorder = new MediaRecorder(this.stream, {
+			mimeType: this.desiredMimeType,
+		});
+		this.mediaRecorder.onstop = this.handleStop.bind(this);
+		this.mediaRecorder.ondataavailable = this.handleDataAvailable.bind(this);
+		this.mediaRecorder.start(10); // timeslice in ms
+	}
+	handleStop(): void {}
+	handleDataAvailable(evt: BlobEvent): void {
+		if (evt.data && evt.data.size > 0) {
+			this.recordedBlobs.push(evt.data);
+		}
+	}
+	async stopRecording(): Promise<Blob> {
+		if (this.mediaRecorder) {
+			this.mediaRecorder.stop();
+		}
+		if (this.stream) {
+			for (const t of this.stream.getTracks()) {
+				t.stop(); // Stop stream.
+			}
+		}
+
+		// handle stopRecording gets called before this.mediaRecorder is initialized
+		if (!this.mediaRecorder) {
+			return new Blob(this.recordedBlobs);
+		}
+
+		await delay(30);
+		// Wait for the last blob in handleDataAvailable.
+		// Alternative: hook into `onstop` event.
+		const superBuffer = new Blob(this.recordedBlobs, {
+			type: this.mimeType,
+		});
+		this.outputBlob = superBuffer;
+		return superBuffer;
+	}
+}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/WidgetRecorder.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/WidgetRecorder.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..05de83da5d0a76caf498c9a7cda0fd1eb9b20448
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/WidgetRecorder.svelte
@@ -0,0 +1,67 @@
+<script lang="ts">
+	import { onDestroy, onMount } from "svelte";
+
+	import IconMicrophone from "$lib/components/Icons/IconMicrophone.svelte";
+
+	import Recorder from "./Recorder.js";
+
+	export let classNames = "";
+	export let onRecordStart: () => void = () => null;
+	export let onRecordStop: (blob: Blob) => void = () => null;
+	export let onError: (err: string) => void = () => null;
+
+	let isRecording = false;
+	let recorder: Recorder;
+
+	onMount(() => {
+		recorder = new Recorder();
+	});
+
+	onDestroy(() => {
+		if (recorder) {
+			recorder.stopRecording();
+		}
+	});
+
+	async function onClick() {
+		try {
+			isRecording = !isRecording;
+			if (isRecording) {
+				await recorder.start();
+				onRecordStart();
+			} else {
+				const blob = await recorder.stopRecording();
+				onRecordStop(blob);
+			}
+		} catch (e) {
+			isRecording = false;
+			if (e instanceof Error) {
+				switch (e.name) {
+					case "NotAllowedError": {
+						onError("Please allow access to your microphone & refresh the page");
+						break;
+					}
+					case "NotFoundError": {
+						onError("No microphone found on your device");
+						break;
+					}
+					default: {
+						onError(`Encountered error "${e.name}: ${e.message}"`);
+						break;
+					}
+				}
+			} else {
+				onError(String(e));
+			}
+		}
+	}
+</script>
+
+<button class="btn-widget {classNames}" on:click={onClick} type="button">
+	<div class="flex items-center {isRecording ? 'animate-pulse text-red-500' : ''}">
+		<IconMicrophone classNames="-ml-1 mr-1.5" />
+		<span>
+			{isRecording ? "Click to stop recording" : "Record from browser"}
+		</span>
+	</div>
+</button>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetShortcutRunLabel/WidgetShortcutRunLabel.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetShortcutRunLabel/WidgetShortcutRunLabel.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..322663467417a826f7237bd155a7d7e90b9b1f3b
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetShortcutRunLabel/WidgetShortcutRunLabel.svelte
@@ -0,0 +1,22 @@
+<script lang="ts">
+	import { onMount } from "svelte";
+
+	export let isLoading: boolean;
+	export let isDisabled = false;
+
+	let shortcutLabel = "";
+
+	onMount(() => {
+		const isMac = navigator.platform.includes("Mac");
+		shortcutLabel = isMac ? "⌘+Enter" : "ctrl+Enter";
+	});
+</script>
+
+{#if !isDisabled}
+	<kbd
+		class="hidden rounded border border-gray-200 bg-gray-100 py-0.5 px-1.5 text-xs leading-none text-gray-700 dark:bg-gray-800 dark:text-gray-300 md:inline {isLoading
+			? 'opacity-40'
+			: 'opacity-70'}"
+		>{shortcutLabel}
+	</kbd>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetState/WidgetState.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetState/WidgetState.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..94be11f7b7e0062693401d72804c5ef57ba39010
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetState/WidgetState.svelte
@@ -0,0 +1,17 @@
+<script lang="ts">
+	export let currentState: "error" | "loaded" | "unknown" | undefined;
+</script>
+
+<div class="blankslate">
+	<div class="subtitle text-xs text-gray-500">
+		<div class="loaded mt-2 {currentState !== 'loaded' ? 'hidden' : ''}">
+			This model is currently loaded and running on the Inference API.
+		</div>
+		<div class="error mt-2 {currentState !== 'error' ? 'hidden' : ''}">
+			⚠️ This model could not be loaded by the inference API. ⚠️
+		</div>
+		<div class="unknown mt-2 {currentState !== 'unknown' ? 'hidden' : ''}">
+			This model can be loaded on the Inference API on-demand.
+		</div>
+	</div>
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..6fc03b2b178d8a17d5c68c3dbd38726338542cd7
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte
@@ -0,0 +1,37 @@
+<script lang="ts">
+	import IconSpin from "$lib/components/Icons/IconSpin.svelte";
+
+	export let classNames = "";
+	export let isDisabled = false;
+	export let isLoading: boolean;
+	export let label = "Compute";
+	export let onClick: () => void;
+
+	function onKeyDown(e: KeyboardEvent) {
+		if (isLoading || isDisabled) {
+			return;
+		}
+		// run inference on cmd+Enter
+		if (e.code === "Enter" && (e.metaKey || e.ctrlKey)) {
+			e.preventDefault();
+			onClick();
+		}
+	}
+</script>
+
+<svelte:window on:keydown={onKeyDown} />
+
+{#if !isDisabled}
+	<button
+		class="btn-widget h-10 w-24 px-5 {classNames}"
+		disabled={isDisabled || isLoading}
+		on:click|preventDefault={onClick}
+		type="submit"
+	>
+		{#if isLoading}
+			<IconSpin classNames="text-gray-600 animate-spin" />
+		{:else}
+			{label}
+		{/if}
+	</button>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTableInput/WidgetTableInput.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTableInput/WidgetTableInput.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..765ef6d23494385fba3a0cdfe749b6b5c6131b94
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTableInput/WidgetTableInput.svelte
@@ -0,0 +1,112 @@
+<script lang="ts">
+	import type { HighlightCoordinates } from "../types.js";
+
+	import { onMount, tick } from "svelte";
+
+	import { scrollToMax } from "../../../../utils/ViewUtils.js";
+	import IconRow from "$lib/components/Icons/IconRow.svelte";
+
+	export let onChange: (table: (string | number)[][]) => void;
+	export let highlighted: HighlightCoordinates;
+	export let table: (string | number)[][] = [[]];
+	export let canAddRow = true;
+	export let canAddCol = true;
+	export let isLoading = false;
+	export let isDisabled = false;
+
+	let initialTable: (string | number)[][] = [[]];
+	let tableContainerEl: HTMLElement;
+
+	onMount(() => {
+		initialTable = table.map((row) => row.map((cell) => cell));
+	});
+
+	async function addCol() {
+		const updatedTable = table.map((row, colIndex) => [...row, colIndex === 0 ? `Header ${table[0].length + 1}` : ""]);
+		onChange(updatedTable);
+		await scrollTableToRight();
+	}
+
+	export async function scrollTableToRight(): Promise<void> {
+		await tick();
+		scrollToMax(tableContainerEl, "x");
+	}
+
+	function addRow() {
+		const updatedTable = [...table, Array(table[0].length).fill("")];
+		onChange(updatedTable);
+	}
+
+	function editCell(e: Event, [x, y]: [number, number]) {
+		const value = (e.target as HTMLElement)?.innerText;
+
+		const updatedTable = table.map((row, rowIndex) =>
+			rowIndex === y ? row.map((col, colIndex) => (colIndex === x ? value : col)) : row
+		);
+		onChange(updatedTable);
+	}
+
+	function onKeyDown(e: KeyboardEvent) {
+		if (e.code === "Enter") {
+			(e.target as HTMLElement)?.blur();
+		}
+	}
+
+	function resetTable() {
+		const updatedTable = initialTable;
+		onChange(updatedTable);
+	}
+</script>
+
+<div class="overflow-auto" bind:this={tableContainerEl}>
+	{#if table.length > 1}
+		<table class="table-question-answering">
+			<thead>
+				<tr>
+					{#each table[0] as header, x}
+						<th
+							contenteditable={canAddCol && !isLoading && !isDisabled}
+							class="h-6 border-2 border-gray-100"
+							on:keydown={onKeyDown}
+							on:input={(e) => editCell(e, [x, 0])}
+						>
+							{header}
+						</th>
+					{/each}
+				</tr>
+			</thead>
+			<tbody>
+				{#each table.slice(1) as row, y}
+					<tr class={highlighted[`${y}`] ?? "bg-white"}>
+						{#each row as cell, x}
+							<td
+								class={(highlighted[`${y}-${x}`] ?? "border-gray-100") + " h-6 border-2"}
+								contenteditable={!isLoading && !isDisabled}
+								on:keydown={onKeyDown}
+								on:input={(e) => editCell(e, [x, y + 1])}>{cell}</td
+							>
+						{/each}
+					</tr>
+				{/each}
+			</tbody>
+		</table>
+	{/if}
+</div>
+
+<div class="mb-1 flex flex-wrap">
+	{#if canAddRow}
+		<button class="btn-widget mt-2 mr-1.5 flex-1 lg:flex-none" on:click={addRow} type="button">
+			<IconRow classNames="mr-2" />
+			Add row
+		</button>
+	{/if}
+	{#if canAddCol}
+		<button class="btn-widget mt-2 flex-1 lg:mr-1.5 lg:flex-none" on:click={addCol} type="button">
+			<IconRow classNames="transform rotate-90 mr-1" />
+			Add col
+		</button>
+	{/if}
+	<button class="btn-widget mt-2 flex-1 lg:ml-auto lg:flex-none" on:click={resetTable} type="button">
+		Reset table
+	</button>
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextInput/WidgetTextInput.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextInput/WidgetTextInput.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..50665b478649c00a98a9d610b1437ecd21be2679
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextInput/WidgetTextInput.svelte
@@ -0,0 +1,20 @@
+<script lang="ts">
+	import WidgetLabel from "../WidgetLabel/WidgetLabel.svelte";
+
+	export let label: string | undefined = undefined;
+	export let placeholder: string = "Your sentence here...";
+	export let isDisabled = false;
+	export let value: string;
+</script>
+
+<WidgetLabel {label}>
+	<svelte:fragment slot="after">
+		<input
+			bind:value
+			class="{label ? 'mt-1.5' : ''} form-input-alt block w-full"
+			placeholder={isDisabled ? "" : placeholder}
+			disabled={isDisabled}
+			type="text"
+		/>
+	</svelte:fragment>
+</WidgetLabel>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextarea/WidgetTextarea.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextarea/WidgetTextarea.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..2f56f731432bd3194e230ec6e6707f59a3bd80a0
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextarea/WidgetTextarea.svelte
@@ -0,0 +1,106 @@
+<script lang="ts">
+	import { tick } from "svelte";
+
+	import { delay } from "../../../../utils/ViewUtils.js";
+	import WidgetLabel from "../WidgetLabel/WidgetLabel.svelte";
+
+	export let label: string = "";
+	export let placeholder: string = "Your sentence here...";
+	export let value: string;
+	export let isLoading = false;
+	export let isDisabled = false;
+	export let size: "small" | "big" = "small";
+
+	let containerSpanEl: HTMLSpanElement;
+	const typingEffectSpeedMs = 12;
+	const classNamesInput = "whitespace-pre-wrap inline font-normal text-black dark:text-white";
+	const classNamesOutput = "whitespace-pre-wrap inline text-blue-600 dark:text-blue-400";
+
+	export async function renderTypingEffect(outputTxt: string): Promise<void> {
+		const spanEl = document.createElement("span");
+		spanEl.contentEditable = "true";
+		spanEl.className = classNamesOutput;
+		containerSpanEl?.appendChild(spanEl);
+		await tick();
+		// fix Chrome bug that adds `<br>` els on contentedtiable el
+		const brElts = containerSpanEl?.querySelectorAll("br");
+		for (const brEl of brElts) {
+			brEl.remove();
+		}
+		await tick();
+		// split on whitespace or any other character to correctly render newlines \n
+		for (const char of outputTxt.split(/(\s|.)/g)) {
+			await delay(typingEffectSpeedMs);
+			spanEl.textContent += char;
+			moveCaretToEnd();
+		}
+		updateInnerTextValue();
+	}
+
+	function moveCaretToEnd() {
+		containerSpanEl?.focus();
+		if (containerSpanEl) {
+			const range = document.createRange();
+			range.selectNodeContents(containerSpanEl);
+			range.collapse(false);
+			const selection = window.getSelection();
+			selection?.removeAllRanges();
+			selection?.addRange(range);
+		}
+	}
+
+	// handle FireFox contenteditable paste bug
+	function handlePaste(e: ClipboardEvent) {
+		if (isLoading) {
+			return e.preventDefault();
+		}
+		const copiedTxt = e.clipboardData?.getData("text/plain");
+		const selection = window.getSelection();
+		if (selection?.rangeCount && !!copiedTxt?.length) {
+			const range = selection.getRangeAt(0);
+			range.deleteContents();
+			const spanEl = document.createElement("span");
+			spanEl.contentEditable = "true";
+			spanEl.className = classNamesInput;
+			spanEl.textContent = copiedTxt;
+			range.insertNode(spanEl);
+		}
+		window.getSelection()?.collapseToEnd();
+		updateInnerTextValue();
+	}
+
+	function updateInnerTextValue() {
+		value = containerSpanEl?.textContent ?? "";
+	}
+
+	export function setValue(text: string): void {
+		containerSpanEl.textContent = text;
+		updateInnerTextValue();
+	}
+</script>
+
+<WidgetLabel {label}>
+	<svelte:fragment slot="after">
+		<!-- `whitespace-pre-wrap inline-block` are needed to get correct newlines from `el.textContent` on Chrome -->
+		<span
+			class="{label ? 'mt-1.5' : ''} block w-full resize-y overflow-auto py-2 px-3 {size === 'small'
+				? 'min-h-[42px]'
+				: 'min-h-[144px]'} inline-block max-h-[500px] whitespace-pre-wrap rounded-lg border border-gray-200 shadow-inner outline-none focus:shadow-inner focus:ring focus:ring-blue-200 dark:bg-gray-925"
+			role="textbox"
+			contenteditable={!isLoading && !isDisabled}
+			style="--placeholder: '{isDisabled ? '' : placeholder}'"
+			spellcheck="false"
+			dir="auto"
+			bind:this={containerSpanEl}
+			on:paste|preventDefault={handlePaste}
+			on:input={updateInnerTextValue}
+		/>
+	</svelte:fragment>
+</WidgetLabel>
+
+<style>
+	span[contenteditable]:empty::before {
+		content: var(--placeholder);
+		color: rgba(156, 163, 175);
+	}
+</style>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTimer/WidgetTimer.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTimer/WidgetTimer.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..2d41e1ac523c03e04ad4813df90a7994afd536d4
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTimer/WidgetTimer.svelte
@@ -0,0 +1,35 @@
+<script lang="ts">
+	import { onDestroy } from "svelte";
+
+	export let isDisabled = false;
+
+	let counterSeconds = 0.0;
+	let interval: ReturnType<typeof setInterval>;
+	let shouldDisplay = false;
+
+	// timer show in seconds
+	$: counterHuman = counterSeconds.toLocaleString(undefined, {
+		minimumFractionDigits: 1,
+	});
+
+	export function start(): void {
+		// reset timer for new run
+		stop();
+		counterSeconds = 0.0;
+		shouldDisplay = true;
+		// new run
+		interval = setInterval(() => (counterSeconds += 0.1), 100);
+	}
+
+	export function stop(): void {
+		if (interval) {
+			clearInterval(interval);
+		}
+	}
+
+	onDestroy(() => stop());
+</script>
+
+{#if shouldDisplay && !isDisabled}
+	<span class="font-mono text-xs text-gray-500">{counterHuman}</span>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetWrapper/WidgetWrapper.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetWrapper/WidgetWrapper.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..4d171a03b9e75c0b226a80a879e4b119d0833415
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetWrapper/WidgetWrapper.svelte
@@ -0,0 +1,159 @@
+<script lang="ts">
+	import type { WidgetProps, ModelLoadInfo, ExampleRunOpts } from "../types.js";
+	import type { WidgetExample, WidgetExampleAttribute } from "../WidgetExample.js";
+
+	type TWidgetExample = $$Generic<WidgetExample>;
+
+	import { onMount } from "svelte";
+
+	import IconCross from "$lib/components/Icons/IconCross.svelte";
+	import WidgetInputSamples from "../WidgetInputSamples/WidgetInputSamples.svelte";
+	import WidgetInputSamplesGroup from "../WidgetInputSamplesGroup/WidgetInputSamplesGroup.svelte";
+	import WidgetFooter from "../WidgetFooter/WidgetFooter.svelte";
+	import WidgetHeader from "../WidgetHeader/WidgetHeader.svelte";
+	import WidgetInfo from "../WidgetInfo/WidgetInfo.svelte";
+	import WidgetModelLoading from "../WidgetModelLoading/WidgetModelLoading.svelte";
+	import {
+		getModelLoadInfo,
+		getQueryParamVal,
+		getWidgetExample,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { modelLoadStates } from "../../stores.js";
+	import { InferenceDisplayability } from "../../../../interfaces/InferenceDisplayability.js";
+
+	export let apiUrl: string;
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let computeTime: string;
+	export let error: string;
+	export let isLoading = false;
+	export let model: WidgetProps["model"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	export let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	export let noTitle = false;
+	export let outputJson: string;
+	export let applyInputSample: (sample: TWidgetExample, opts?: ExampleRunOpts) => void = () => {};
+	export let validateExample: (sample: WidgetExample) => sample is TWidgetExample;
+	export let exampleQueryParams: WidgetExampleAttribute[] = [];
+
+	let isDisabled = model.inference !== InferenceDisplayability.Yes && model.pipeline_tag !== "reinforcement-learning";
+	let isMaximized = false;
+	let modelLoadInfo: ModelLoadInfo | undefined = undefined;
+	let selectedInputGroup: string;
+	let modelTooBig = false;
+
+	interface ExamplesGroup {
+		group: string;
+		inputSamples: TWidgetExample[];
+	}
+
+	const allInputSamples = (model.widgetData ?? [])
+		.filter(validateExample)
+		.sort((sample1, sample2) => (sample2.example_title ? 1 : 0) - (sample1.example_title ? 1 : 0))
+		.map((sample, idx) => ({
+			example_title: `Example ${++idx}`,
+			group: "Group 1",
+			...sample,
+		}));
+	let inputSamples = !isDisabled ? allInputSamples : allInputSamples.filter((sample) => sample.output !== undefined);
+	let inputGroups = getExamplesGroups();
+
+	$: selectedInputSamples =
+		inputGroups.length === 1 ? inputGroups[0] : inputGroups.find(({ group }) => group === selectedInputGroup);
+
+	function getExamplesGroups(): ExamplesGroup[] {
+		const inputGroups: ExamplesGroup[] = [];
+		for (const inputSample of inputSamples) {
+			const groupExists = inputGroups.find(({ group }) => group === inputSample.group);
+			if (!groupExists) {
+				inputGroups.push({ group: inputSample.group as string, inputSamples: [] });
+			}
+			inputGroups.find(({ group }) => group === inputSample.group)?.inputSamples.push(inputSample);
+		}
+		return inputGroups;
+	}
+
+	onMount(() => {
+		(async () => {
+			modelLoadInfo = await getModelLoadInfo(apiUrl, model.id, includeCredentials);
+			$modelLoadStates[model.id] = modelLoadInfo;
+			modelTooBig = modelLoadInfo?.state === "TooBig";
+
+			if (modelTooBig) {
+				// disable the widget
+				isDisabled = true;
+				inputSamples = allInputSamples.filter((sample) => sample.output !== undefined);
+				inputGroups = getExamplesGroups();
+			}
+
+			const exampleFromQueryParams = {} as TWidgetExample;
+			for (const key of exampleQueryParams) {
+				const val = getQueryParamVal(key);
+				if (val) {
+					// @ts-expect-error complicated type
+					exampleFromQueryParams[key] = val;
+				}
+			}
+			if (Object.keys(exampleFromQueryParams).length) {
+				// run widget example from query params
+				applyInputSample(exampleFromQueryParams);
+			} else {
+				// run random widget example
+				const example = getWidgetExample<TWidgetExample>(model, validateExample);
+				if (callApiOnMount && example) {
+					applyInputSample(example, { inferenceOpts: { isOnLoadCall: true } });
+				}
+			}
+		})();
+	});
+
+	function onClickMaximizeBtn() {
+		isMaximized = !isMaximized;
+	}
+</script>
+
+{#if isDisabled && !inputSamples.length}
+	<WidgetHeader pipeline={model.pipeline_tag} noTitle={true} />
+	<WidgetInfo {model} {computeTime} {error} {modelLoadInfo} {modelTooBig} />
+{:else}
+	<div
+		class="flex w-full max-w-full flex-col
+		 {isMaximized ? 'fixed inset-0 z-20 bg-white p-12' : ''}
+		 {!modelLoadInfo ? 'hidden' : ''}"
+	>
+		{#if isMaximized}
+			<button class="absolute right-12 top-6" on:click={onClickMaximizeBtn}>
+				<IconCross classNames="text-xl text-gray-500 hover:text-black" />
+			</button>
+		{/if}
+		<WidgetHeader {noTitle} pipeline={model.pipeline_tag} {isDisabled}>
+			{#if !!inputGroups.length}
+				<div class="ml-auto flex gap-x-1">
+					<!-- Show samples selector when there are more than one sample -->
+					{#if inputGroups.length > 1}
+						<WidgetInputSamplesGroup
+							bind:selectedInputGroup
+							{isLoading}
+							inputGroups={inputGroups.map(({ group }) => group)}
+						/>
+					{/if}
+					<WidgetInputSamples
+						classNames={!selectedInputSamples ? "opacity-50 pointer-events-none" : ""}
+						{isLoading}
+						inputSamples={selectedInputSamples?.inputSamples ?? []}
+						{applyInputSample}
+					/>
+				</div>
+			{/if}
+		</WidgetHeader>
+		<slot name="top" {isDisabled} />
+		<WidgetInfo {model} {computeTime} {error} {modelLoadInfo} {modelTooBig} />
+		{#if modelLoading.isLoading}
+			<WidgetModelLoading estimatedTime={modelLoading.estimatedTime} />
+		{/if}
+		<slot name="bottom" />
+		<WidgetFooter {onClickMaximizeBtn} {outputJson} {isDisabled} />
+	</div>
+{/if}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/consts.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/consts.ts
new file mode 100644
index 0000000000000000000000000000000000000000..1ec7359bff5ade2ad5e14096d76175f3f86aaf14
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/consts.ts
@@ -0,0 +1,37 @@
+/**
+ * Color palette for obj-det & img-seg widgets
+ */
+export const COLORS = [
+	{
+		color: "red",
+		hex: "#f87171",
+	},
+	{
+		color: "green",
+		hex: "#4ade80",
+	},
+	{
+		color: "yellow",
+		hex: "#facc15",
+	},
+	{
+		color: "blue",
+		hex: "#60a5fa",
+	},
+	{
+		color: "orange",
+		hex: "#fb923c",
+	},
+	{
+		color: "purple",
+		hex: "#c084fc",
+	},
+	{
+		color: "cyan",
+		hex: "#22d3ee",
+	},
+	{
+		color: "lime",
+		hex: "#a3e635",
+	},
+] as const;
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/helpers.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/helpers.ts
new file mode 100644
index 0000000000000000000000000000000000000000..d34113757f3033b1d903a8f2a3ec0fa6e77c6ee5
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/helpers.ts
@@ -0,0 +1,251 @@
+import type { ModelData } from "$lib/interfaces/Types.js";
+import { randomItem, parseJSON } from "../../../utils/ViewUtils.js";
+import type { WidgetExample, WidgetExampleAttribute } from "./WidgetExample.js";
+import type { ModelLoadInfo, TableData } from "./types.js";
+import { LoadState } from "./types.js";
+
+const KEYS_TEXT: WidgetExampleAttribute[] = ["text", "context", "candidate_labels"];
+const KEYS_TABLE: WidgetExampleAttribute[] = ["table", "structured_data"];
+type QueryParamVal = string | null | boolean | (string | number)[][];
+
+export function getQueryParamVal(key: WidgetExampleAttribute): QueryParamVal {
+	const searchParams = new URL(window.location.href).searchParams;
+	const value = searchParams.get(key);
+	if (KEYS_TEXT.includes(key)) {
+		return value;
+	} else if (KEYS_TABLE.includes(key)) {
+		const table = convertDataToTable((parseJSON(value) as TableData) ?? {});
+		return table;
+	} else if (key === "multi_class") {
+		return value === "true";
+	}
+	return value;
+}
+
+export function getWidgetExample<TWidgetExample extends WidgetExample>(
+	model: ModelData,
+	validateExample: (sample: WidgetExample) => sample is TWidgetExample
+): TWidgetExample | undefined {
+	const validExamples = model.widgetData?.filter(
+		(sample): sample is TWidgetExample => sample && validateExample(sample)
+	);
+	return validExamples?.length ? randomItem(validExamples) : undefined;
+}
+
+// Update current url search params, keeping existing keys intact.
+export function updateUrl(obj: Partial<Record<WidgetExampleAttribute, string | undefined>>): void {
+	if (!window) {
+		return;
+	}
+
+	const sp = new URL(window.location.href).searchParams;
+	for (const [k, v] of Object.entries(obj)) {
+		if (v === undefined) {
+			sp.delete(k);
+		} else {
+			sp.set(k, v);
+		}
+	}
+	const path = `${window.location.pathname}?${sp.toString()}`;
+	window.history.replaceState(null, "", path);
+}
+
+// Run through our own proxy to bypass CORS:
+function proxify(url: string): string {
+	return url.startsWith(`http://localhost`) || new URL(url).host === window.location.host
+		? url
+		: `https://widgets.hf.co/proxy?url=${url}`;
+}
+
+// Get BLOB from a given URL after proxifying the URL
+export async function getBlobFromUrl(url: string): Promise<Blob> {
+	const proxiedUrl = proxify(url);
+	const res = await fetch(proxiedUrl);
+	const blob = await res.blob();
+	return blob;
+}
+
+interface Success<T> {
+	computeTime: string;
+	output: T;
+	outputJson: string;
+	response: Response;
+	status: "success";
+}
+
+interface LoadingModel {
+	error: string;
+	estimatedTime: number;
+	status: "loading-model";
+}
+
+interface Error {
+	error: string;
+	status: "error";
+}
+
+interface CacheNotFound {
+	status: "cache not found";
+}
+
+type Result<T> = Success<T> | LoadingModel | Error | CacheNotFound;
+
+export async function callInferenceApi<T>(
+	url: string,
+	repoId: string,
+	requestBody: Record<string, unknown>,
+	apiToken = "",
+	outputParsingFn: (x: unknown) => T,
+	waitForModel = false, // If true, the server will only respond once the model has been loaded on the inference API,
+	includeCredentials = false,
+	isOnLoadCall = false, // If true, the server will try to answer from cache and not do anything if not
+	useCache = true
+): Promise<Result<T>> {
+	const contentType =
+		"file" in requestBody &&
+		requestBody["file"] &&
+		typeof requestBody["file"] === "object" &&
+		"type" in requestBody["file"] &&
+		typeof requestBody["file"]["type"] === "string"
+			? requestBody["file"]["type"]
+			: "application/json";
+
+	const headers = new Headers();
+	headers.set("Content-Type", contentType);
+	if (apiToken) {
+		headers.set("Authorization", `Bearer ${apiToken}`);
+	}
+	if (waitForModel) {
+		headers.set("X-Wait-For-Model", "true");
+	}
+	if (useCache === false) {
+		headers.set("X-Use-Cache", "false");
+	}
+	if (isOnLoadCall) {
+		headers.set("X-Load-Model", "0");
+	}
+
+	const reqBody: File | string =
+		"file" in requestBody && requestBody["file"] instanceof File ? requestBody.file : JSON.stringify(requestBody);
+
+	const response = await fetch(`${url}/models/${repoId}`, {
+		method: "POST",
+		body: reqBody,
+		headers,
+		credentials: includeCredentials ? "include" : "same-origin",
+	});
+
+	if (response.ok) {
+		// Success
+		const computeTime = response.headers.has("x-compute-time")
+			? `${response.headers.get("x-compute-time")} s`
+			: `cached`;
+		const isMediaContent = (response.headers.get("content-type")?.search(/^(?:audio|image)/i) ?? -1) !== -1;
+
+		const body = !isMediaContent ? await response.json() : await response.blob();
+
+		try {
+			const output = outputParsingFn(body);
+			const outputJson = !isMediaContent ? JSON.stringify(body, null, 2) : "";
+			return { computeTime, output, outputJson, response, status: "success" };
+		} catch (e) {
+			if (isOnLoadCall && body.error === "not loaded yet") {
+				return { status: "cache not found" };
+			}
+			// Invalid output
+			const error = `API Implementation Error: ${String(e).replace(/^Error: /, "")}`;
+			return { error, status: "error" };
+		}
+	} else {
+		// Error
+		const bodyText = await response.text();
+		const body = parseJSON<Record<string, unknown>>(bodyText) ?? {};
+
+		if (
+			body["error"] &&
+			response.status === 503 &&
+			body["estimated_time"] !== null &&
+			body["estimated_time"] !== undefined
+		) {
+			// Model needs loading
+			return { error: String(body["error"]), estimatedTime: +body["estimated_time"], status: "loading-model" };
+		} else {
+			// Other errors
+			const { status, statusText } = response;
+			return {
+				error: String(body["error"]) || String(body["traceback"]) || `${status} ${statusText}`,
+				status: "error",
+			};
+		}
+	}
+}
+
+export async function getModelLoadInfo(
+	url: string,
+	repoId: string,
+	includeCredentials = false
+): Promise<ModelLoadInfo> {
+	const response = await fetch(`${url}/status/${repoId}`, {
+		credentials: includeCredentials ? "include" : "same-origin",
+	});
+	const output = await response.json();
+	if (response.ok && typeof output === "object" && output.loaded !== undefined) {
+		// eslint-disable-next-line @typescript-eslint/naming-convention
+		const { state, compute_type } = output;
+		return { compute_type, state };
+	} else {
+		console.warn(response.status, output.error);
+		return { state: LoadState.Error };
+	}
+}
+
+// Extend Inference API requestBody with user supplied Inference API parameters
+export function addInferenceParameters(requestBody: Record<string, unknown>, model: ModelData): void {
+	const inference = model?.cardData?.inference;
+	if (typeof inference === "object") {
+		const inferenceParameters = inference?.parameters;
+		if (inferenceParameters) {
+			if (requestBody.parameters) {
+				requestBody.parameters = { ...requestBody.parameters, ...inferenceParameters };
+			} else {
+				requestBody.parameters = inferenceParameters;
+			}
+		}
+	}
+}
+
+/*
+ * Converts table from [[Header0, Header1, Header2], [Column0Val0, Column1Val0, Column2Val0], ...]
+ * to {Header0: [ColumnVal0, ...], Header1: [Column1Val0, ...], Header2: [Column2Val0, ...]}
+ */
+export function convertTableToData(table: (string | number)[][]): TableData {
+	return Object.fromEntries(
+		table[0].map((cell, x) => {
+			return [
+				cell,
+				table
+					.slice(1)
+					.flat()
+					.filter((_, i) => i % table[0].length === x)
+					.map((v) => String(v)), // some models can only handle strings (no numbers)
+			];
+		})
+	);
+}
+
+/**
+ * Converts data from {Header0: [ColumnVal0, ...], Header1: [Column1Val0, ...], Header2: [Column2Val0, ...]}
+ * to [[Header0, Header1, Header2], [Column0Val0, Column1Val0, Column2Val0], ...]
+ */
+export function convertDataToTable(data: TableData): (string | number)[][] {
+	const dataArray = Object.entries(data); // [header, cell[]][]
+	const nbCols = dataArray.length;
+	const nbRows = (dataArray[0]?.[1]?.length ?? 0) + 1;
+	return Array(nbRows)
+		.fill("")
+		.map((_, y) =>
+			Array(nbCols)
+				.fill("")
+				.map((__, x) => (y === 0 ? dataArray[x][0] : dataArray[x][1][y - 1]))
+		);
+}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/inputValidation.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/inputValidation.ts
new file mode 100644
index 0000000000000000000000000000000000000000..06224afdde3e84fc3fe654d9e52bf71511e639f9
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/inputValidation.ts
@@ -0,0 +1,87 @@
+import type {
+	WidgetExample,
+	WidgetExampleAssetAndPromptInput,
+	WidgetExampleAssetAndTextInput,
+	WidgetExampleAssetAndZeroShotInput,
+	WidgetExampleAssetInput,
+	WidgetExampleSentenceSimilarityInput,
+	WidgetExampleStructuredDataInput,
+	WidgetExampleTableDataInput,
+	WidgetExampleTextAndContextInput,
+	WidgetExampleTextAndTableInput,
+	WidgetExampleTextInput,
+	WidgetExampleZeroShotTextInput,
+} from "./WidgetExample.js";
+
+export function isTextInput<TOutput>(sample: WidgetExample<TOutput>): sample is WidgetExampleTextInput<TOutput> {
+	return "text" in sample;
+}
+
+export function isTextAndContextInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is WidgetExampleTextAndContextInput<TOutput> {
+	return isTextInput(sample) && "context" in sample;
+}
+
+export function isAssetInput<TOutput>(sample: WidgetExample<TOutput>): sample is WidgetExampleAssetInput<TOutput> {
+	return "src" in sample;
+}
+
+export function isAssetAndPromptInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is WidgetExampleAssetAndPromptInput<TOutput> {
+	return isAssetInput(sample) && "prompt" in sample && typeof sample.prompt === "string";
+}
+
+export function isAssetAndTextInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is WidgetExampleAssetAndTextInput<TOutput> {
+	return isAssetInput(sample) && isTextInput(sample);
+}
+
+export function isStructuredDataInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is WidgetExampleStructuredDataInput<TOutput> {
+	return "structured_data" in sample;
+}
+
+export function isTableDataInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is WidgetExampleTableDataInput<TOutput> {
+	return "table" in sample;
+}
+
+function _isZeroShotTextInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is Exclude<WidgetExampleZeroShotTextInput<TOutput>, "text"> {
+	return "candidate_labels" in sample && "multi_class" in sample;
+}
+
+export function isZeroShotTextInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is WidgetExampleZeroShotTextInput<TOutput> {
+	return isTextInput(sample) && _isZeroShotTextInput(sample);
+}
+
+export function isSentenceSimilarityInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is WidgetExampleSentenceSimilarityInput<TOutput> {
+	return "source_sentence" in sample && "sentences" in sample;
+}
+
+export function isTextAndTableInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is WidgetExampleTextAndTableInput<TOutput> {
+	return (
+		isTextInput(sample) &&
+		"table" in sample &&
+		Array.isArray(sample.table) &&
+		sample.table.every((r) => Array.isArray(r) && r.every((c) => typeof c === "string" || typeof c === "number"))
+	);
+}
+
+export function isAssetAndZeroShotInput<TOutput>(
+	sample: WidgetExample<TOutput>
+): sample is WidgetExampleAssetAndZeroShotInput<TOutput> {
+	return isAssetInput(sample) && _isZeroShotTextInput(sample);
+}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/outputValidation.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/outputValidation.ts
new file mode 100644
index 0000000000000000000000000000000000000000..66b092a2c5b179f01d772b8f3768e2bd795a734c
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/outputValidation.ts
@@ -0,0 +1,35 @@
+import type {
+	WidgetExampleOutputLabels,
+	WidgetExampleOutputAnswerScore,
+	WidgetExampleOutputText,
+	WidgetExampleOutputUrl,
+} from "./WidgetExample.js";
+
+export function isValidOutputLabels(arg: unknown): arg is WidgetExampleOutputLabels {
+	return Array.isArray(arg) && arg.every((x) => typeof x.label === "string" && typeof x.score === "number");
+}
+
+export function isValidOutputAnswerScore(arg: unknown): arg is WidgetExampleOutputAnswerScore {
+	return (
+		!!arg &&
+		typeof arg === "object" &&
+		"answer" in arg &&
+		typeof arg["answer"] === "string" &&
+		"score" in arg &&
+		typeof arg["score"] === "number"
+	);
+}
+
+export function isValidOutputText(arg: unknown): arg is WidgetExampleOutputText {
+	return !!arg && typeof arg === "object" && "text" in arg && typeof arg["text"] === "string";
+}
+
+export function isValidOutputUrl(arg: unknown): arg is WidgetExampleOutputUrl {
+	return (
+		!!arg &&
+		typeof arg === "object" &&
+		"url" in arg &&
+		typeof arg["url"] === "string" &&
+		arg["url"].startsWith("https://")
+	);
+}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/types.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/types.ts
new file mode 100644
index 0000000000000000000000000000000000000000..70b6f4b41e4ff3a40f3172b61dcbbeb9e6bf06f3
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/shared/types.ts
@@ -0,0 +1,68 @@
+import type { ModelData } from "../../../interfaces/Types.js";
+import type { WidgetExampleOutput } from "./WidgetExample.js";
+
+export interface WidgetProps {
+	apiToken?: string;
+	apiUrl: string;
+	callApiOnMount: boolean;
+	model: ModelData;
+	noTitle: boolean;
+	shouldUpdateUrl: boolean;
+	includeCredentials: boolean;
+	isLoggedIn?: boolean;
+}
+
+export interface InferenceRunOpts<TOutput = WidgetExampleOutput> {
+	withModelLoading?: boolean;
+	isOnLoadCall?: boolean;
+	useCache?: boolean;
+	exampleOutput?: TOutput;
+}
+
+export interface ExampleRunOpts {
+	isPreview?: boolean;
+	inferenceOpts?: InferenceRunOpts;
+}
+
+export enum LoadState {
+	Loadable = "Loadable",
+	Loaded = "Loaded",
+	TooBig = "TooBig",
+	Error = "error",
+}
+
+export enum ComputeType {
+	CPU = "cpu",
+	GPU = "gpu",
+}
+
+export interface ModelLoadInfo {
+	state: LoadState;
+	compute_type?: ComputeType;
+}
+
+export type TableData = Record<string, (string | number)[]>;
+
+export type HighlightCoordinates = Record<string, string>;
+
+interface Box {
+	xmin: number;
+	ymin: number;
+	xmax: number;
+	ymax: number;
+}
+
+export interface DetectedObject {
+	box: Box;
+	label: string;
+	score: number;
+	color?: string;
+}
+export interface ImageSegment {
+	label: string;
+	score: number;
+	mask: string;
+	color?: string;
+	imgData?: ImageData;
+	bitmap?: ImageBitmap;
+}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/stores.ts b/packages/widgets/src/lib/components/InferenceWidget/stores.ts
new file mode 100644
index 0000000000000000000000000000000000000000..a9ed4f27e523d54cb00135f8e7bca2fad6fd5c7e
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/stores.ts
@@ -0,0 +1,4 @@
+import { writable } from "svelte/store";
+import type { ModelLoadInfo } from "./shared/types.js";
+
+export const modelLoadStates = writable<Record<string, ModelLoadInfo>>({});
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioClassificationWidget/AudioClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioClassificationWidget/AudioClassificationWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..d75cea4f8ec57c40efc37022475cd96156703b9b
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioClassificationWidget/AudioClassificationWidget.svelte
@@ -0,0 +1,202 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type {
+		WidgetExample,
+		WidgetExampleAssetInput,
+		WidgetExampleOutputLabels,
+	} from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetAudioTrack from "../../shared/WidgetAudioTrack/WidgetAudioTrack.svelte";
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetOutputChart from "../../shared/WidgetOutputChart/WidgetOutputChart.svelte";
+	import WidgetRecorder from "../../shared/WidgetRecorder/WidgetRecorder.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { callInferenceApi, getBlobFromUrl } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isValidOutputLabels } from "$lib/components/InferenceWidget/shared/outputValidation.js";
+	import { isAssetInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let computeTime = "";
+	let error: string = "";
+	let file: Blob | File | null = null;
+	let filename: string = "";
+	let fileUrl: string;
+	let isLoading = false;
+	let isRecording = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: Array<{ label: string; score: number }> = [];
+	let outputJson: string;
+	let selectedSampleUrl = "";
+	let warning: string = "";
+
+	function onRecordStart() {
+		file = null;
+		filename = "";
+		fileUrl = "";
+		isRecording = true;
+	}
+
+	function onRecordError(err: string) {
+		error = err;
+	}
+
+	function onSelectFile(updatedFile: Blob | File) {
+		isRecording = false;
+		selectedSampleUrl = "";
+
+		if (updatedFile.size !== 0) {
+			const date = new Date();
+			const time = date.toLocaleTimeString("en-US");
+			filename = "name" in updatedFile ? updatedFile.name : `Audio recorded from browser [${time}]`;
+			file = updatedFile;
+			fileUrl = URL.createObjectURL(file);
+		}
+	}
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts<WidgetExampleOutputLabels> = {}) {
+		if (exampleOutput) {
+			output = exampleOutput;
+			outputJson = "";
+			return;
+		}
+
+		if (!file && !selectedSampleUrl) {
+			error = "You must select or record an audio file";
+			output = [];
+			outputJson = "";
+			return;
+		}
+
+		if (!file && selectedSampleUrl) {
+			file = await getBlobFromUrl(selectedSampleUrl);
+		}
+
+		const requestBody = { file };
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		warning = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = [];
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+			if (output.length === 0) {
+				warning = "No classes were detected";
+			}
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: unknown): Array<{ label: string; score: number }> {
+		if (isValidOutputLabels(body)) {
+			return body;
+		}
+		throw new TypeError("Invalid output: output must be of type Array<label: string, score:number>");
+	}
+
+	function applyInputSample(sample: WidgetExampleAssetInput<WidgetExampleOutputLabels>, opts: ExampleRunOpts = {}) {
+		filename = sample.example_title!;
+		fileUrl = sample.src;
+
+		if (opts.isPreview) {
+			if (isValidOutputLabels(sample.output)) {
+				output = sample.output;
+				outputJson = "";
+			} else {
+				output = [];
+				outputJson = "";
+			}
+			return;
+		}
+		file = null;
+		selectedSampleUrl = sample.src;
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+
+	function validateExample(sample: WidgetExample): sample is WidgetExampleAssetInput<WidgetExampleOutputLabels> {
+		return isAssetInput(sample) && (!sample.output || isValidOutputLabels(sample.output));
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	{validateExample}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<div class="flex flex-wrap items-center {isDisabled ? 'pointer-events-none hidden opacity-50' : ''}">
+				<WidgetFileInput accept="audio/*" classNames="mt-1.5 mr-2" {onSelectFile} />
+				<span class="mr-2 mt-1.5">or</span>
+				<WidgetRecorder classNames="mt-1.5" {onRecordStart} onRecordStop={onSelectFile} onError={onRecordError} />
+			</div>
+			{#if fileUrl}
+				<WidgetAudioTrack classNames="mt-3" label={filename} src={fileUrl} />
+			{/if}
+			<WidgetSubmitBtn
+				classNames="mt-2"
+				isDisabled={isRecording || isDisabled}
+				{isLoading}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+			{#if warning}
+				<div class="alert alert-warning mt-2">{warning}</div>
+			{/if}
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		<WidgetOutputChart classNames="pt-4" {output} />
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioToAudioWidget/AudioToAudioWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioToAudioWidget/AudioToAudioWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..5592fc5793b8f1e7ff1785b0a2250b8afecdd6db
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioToAudioWidget/AudioToAudioWidget.svelte
@@ -0,0 +1,191 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleAssetInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetAudioTrack from "../../shared/WidgetAudioTrack/WidgetAudioTrack.svelte";
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetRecorder from "../../shared/WidgetRecorder/WidgetRecorder.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { callInferenceApi, getBlobFromUrl } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isAssetInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	let computeTime = "";
+	let error: string = "";
+	let file: Blob | File | null = null;
+	let filename: string = "";
+	let fileUrl: string;
+	let isLoading = false;
+	let isRecording = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: AudioItem[] = [];
+	let outputJson: string;
+	let selectedSampleUrl = "";
+
+	interface AudioItem {
+		blob: string;
+		label: string;
+		src?: string;
+		"content-type": string;
+	}
+
+	function onRecordStart() {
+		file = null;
+		filename = "";
+		fileUrl = "";
+		isRecording = true;
+	}
+
+	function onRecordError(err: string) {
+		error = err;
+	}
+
+	function onSelectFile(updatedFile: Blob | File) {
+		isRecording = false;
+		selectedSampleUrl = "";
+		if (updatedFile.size !== 0) {
+			const date = new Date();
+			const time = date.toLocaleTimeString("en-US");
+			filename = "name" in updatedFile ? updatedFile.name : `Audio recorded from browser [${time}]`;
+			file = updatedFile;
+			fileUrl = URL.createObjectURL(file);
+		}
+	}
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		if (!file && !selectedSampleUrl) {
+			error = "You must select or record an audio file";
+			return;
+		}
+
+		if (!file && selectedSampleUrl) {
+			file = await getBlobFromUrl(selectedSampleUrl);
+		}
+		const requestBody = { file };
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = [];
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function isValidOutput(arg: any): arg is AudioItem[] {
+		return (
+			Array.isArray(arg) &&
+			arg.every(
+				(x) => typeof x.blob === "string" && typeof x.label === "string" && typeof x["content-type"] === "string"
+			)
+		);
+	}
+
+	function parseOutput(body: unknown): AudioItem[] {
+		if (isValidOutput(body)) {
+			for (const item of body) {
+				item.src = `data:${item["content-type"]};base64,${item.blob}`;
+			}
+			return body;
+		}
+		throw new TypeError("Invalid output: output must be of type Array<blob:string, label:string, content-type:string>");
+	}
+
+	function applyInputSample(sample: WidgetExampleAssetInput, opts: ExampleRunOpts = {}) {
+		filename = sample.example_title ?? "";
+		fileUrl = sample.src;
+		if (opts.isPreview) {
+			output = [];
+			outputJson = "";
+			return;
+		}
+		file = null;
+		selectedSampleUrl = sample.src;
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isAssetInput}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<div class="flex flex-wrap items-center {isDisabled ? 'pointer-events-none hidden opacity-50' : ''}">
+				<WidgetFileInput accept="audio/*" classNames="mt-1.5 mr-2" {onSelectFile} />
+				<span class="mr-2 mt-1.5">or</span>
+				<WidgetRecorder classNames="mt-1.5" {onRecordStart} onRecordStop={onSelectFile} onError={onRecordError} />
+			</div>
+			{#if fileUrl}
+				<WidgetAudioTrack classNames="mt-3" label={filename} src={fileUrl} />
+			{/if}
+			<WidgetSubmitBtn
+				classNames="mt-2"
+				isDisabled={isRecording || isDisabled}
+				{isLoading}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#each output as item}
+			<div class="mt-2 flex items-center">
+				<span class="mr-2">{item.label}:</span>
+				<WidgetAudioTrack classNames="" src={item.src} />
+			</div>
+		{/each}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/AutomaticSpeechRecognitionWidget/AutomaticSpeechRecognitionWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/AutomaticSpeechRecognitionWidget/AutomaticSpeechRecognitionWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..94687285a0eccde262346ba10fdef41c5100f76e
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/AutomaticSpeechRecognitionWidget/AutomaticSpeechRecognitionWidget.svelte
@@ -0,0 +1,226 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type {
+		WidgetExample,
+		WidgetExampleAssetInput,
+		WidgetExampleOutputText,
+	} from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetAudioTrack from "../../shared/WidgetAudioTrack/WidgetAudioTrack.svelte";
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetOutputText from "../../shared/WidgetOutputText/WidgetOutputText.svelte";
+	import WidgetRecorder from "../../shared/WidgetRecorder/WidgetRecorder.svelte";
+	import WidgetRealtimeRecorder from "../../shared/WidgetRealtimeRecorder/WidgetRealtimeRecorder.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { callInferenceApi, getBlobFromUrl } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isValidOutputText } from "$lib/components/InferenceWidget/shared/outputValidation.js";
+	import { isAssetInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	let computeTime = "";
+	let error: string = "";
+	let file: Blob | File | null = null;
+	let filename: string = "";
+	let fileUrl: string;
+	let isLoading = false;
+	let isRecording = false;
+	let isRealtimeRecording = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output = "";
+	let outputJson: string;
+	let selectedSampleUrl = "";
+	let warning: string = "";
+
+	function onRecordStart() {
+		file = null;
+		filename = "";
+		fileUrl = "";
+		isRecording = true;
+	}
+
+	function onRecordError(err: string) {
+		error = err;
+	}
+
+	function onSelectFile(updatedFile: Blob | File) {
+		isRecording = false;
+		selectedSampleUrl = "";
+
+		if (updatedFile.size !== 0) {
+			const date = new Date();
+			const time = date.toLocaleTimeString("en-US");
+			filename = "name" in updatedFile ? updatedFile.name : `Audio recorded from browser [${time}]`;
+			file = updatedFile;
+			fileUrl = URL.createObjectURL(file);
+		}
+	}
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts<WidgetExampleOutputText> = {}) {
+		if (exampleOutput) {
+			output = exampleOutput.text;
+			outputJson = "";
+			return;
+		}
+
+		if (!file && !selectedSampleUrl) {
+			error = "You must select or record an audio file";
+			output = "";
+			outputJson = "";
+			return;
+		}
+
+		if (!file && selectedSampleUrl) {
+			file = await getBlobFromUrl(selectedSampleUrl);
+		}
+
+		const requestBody = { file };
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		warning = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = "";
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+			if (output.length === 0) {
+				warning = "No speech was detected";
+			}
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: unknown): string {
+		if (isValidOutputText(body)) {
+			return body.text;
+		}
+		throw new TypeError("Invalid output: output must be of type <text:string>");
+	}
+
+	function applyInputSample(sample: WidgetExampleAssetInput<WidgetExampleOutputText>, opts: ExampleRunOpts = {}) {
+		filename = sample.example_title!;
+		fileUrl = sample.src;
+		if (opts.isPreview) {
+			if (isValidOutputText(sample.output)) {
+				output = sample.output.text;
+				outputJson = "";
+			} else {
+				output = "";
+				outputJson = "";
+			}
+			return;
+		}
+		file = null;
+		selectedSampleUrl = sample.src;
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+
+	function updateModelLoading(isLoading: boolean, estimatedTime: number = 0) {
+		modelLoading = { isLoading, estimatedTime };
+	}
+
+	function validateExample(sample: WidgetExample): sample is WidgetExampleAssetInput<WidgetExampleOutputText> {
+		return isAssetInput(sample) && (!sample.output || isValidOutputText(sample.output));
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	{validateExample}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<div class="flex flex-wrap items-center {isDisabled ? 'pointer-events-none hidden opacity-50' : ''}">
+				{#if !isRealtimeRecording}
+					<WidgetFileInput accept="audio/*" classNames="mt-1.5" {onSelectFile} />
+					<span class="mx-2 mt-1.5">or</span>
+					<WidgetRecorder classNames="mt-1.5" {onRecordStart} onRecordStop={onSelectFile} onError={onRecordError} />
+				{/if}
+				{#if model?.library_name === "transformers"}
+					{#if !isRealtimeRecording}
+						<span class="mx-2 mt-1.5">or</span>
+					{/if}
+					<WidgetRealtimeRecorder
+						classNames="mt-1.5"
+						{apiToken}
+						{model}
+						{updateModelLoading}
+						onRecordStart={() => (isRealtimeRecording = true)}
+						onRecordStop={() => (isRealtimeRecording = false)}
+						onError={onRecordError}
+					/>
+				{/if}
+			</div>
+			{#if !isRealtimeRecording}
+				{#if fileUrl}
+					<WidgetAudioTrack classNames="mt-3" label={filename} src={fileUrl} />
+				{/if}
+				<WidgetSubmitBtn
+					classNames="mt-2"
+					isDisabled={isRecording || isDisabled}
+					{isLoading}
+					onClick={() => {
+						getOutput();
+					}}
+				/>
+				{#if warning}
+					<div class="alert alert-warning mt-2">{warning}</div>
+				{/if}
+			{/if}
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		<WidgetOutputText classNames="mt-4" {output} />
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ConversationalWidget/ConversationalWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ConversationalWidget/ConversationalWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..b3d90789dc53e319584e5db81427eefb0c5b8416
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ConversationalWidget/ConversationalWidget.svelte
@@ -0,0 +1,190 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleTextInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetOutputConvo from "../../shared/WidgetOutputConvo/WidgetOutputConvo.svelte";
+	import WidgetQuickInput from "../../shared/WidgetQuickInput/WidgetQuickInput.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	interface Conversation {
+		generated_responses: string[];
+		past_user_inputs: string[];
+	}
+	interface Response {
+		conversation: Conversation;
+		generated_text: string;
+	}
+
+	type Output = Array<{
+		input: string;
+		response: string;
+	}>;
+
+	let computeTime = "";
+	let conversation: {
+		generated_responses: string[];
+		past_user_inputs: string[];
+	} = {
+		generated_responses: [],
+		past_user_inputs: [],
+	};
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: Output = [];
+	let outputJson: string;
+	let text = "";
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedText = text.trim();
+
+		if (!trimmedText) {
+			return;
+		}
+
+		if (shouldUpdateUrl && !conversation.past_user_inputs.length) {
+			updateUrl({ text: trimmedText });
+		}
+
+		const requestBody = {
+			inputs: {
+				generated_responses: conversation.generated_responses,
+				past_user_inputs: conversation.past_user_inputs,
+				text: trimmedText,
+			},
+		};
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			outputJson = res.outputJson;
+			if (res.output) {
+				conversation = res.output.conversation;
+				output = res.output.output;
+			}
+			// Emptying input value
+			text = "";
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function isValidOutput(arg: any): arg is Response {
+		return (
+			arg && Array.isArray(arg?.conversation?.generated_responses) && Array.isArray(arg?.conversation?.past_user_inputs)
+		);
+	}
+
+	function parseOutput(body: unknown): {
+		conversation: Conversation;
+		output: Output;
+	} {
+		if (isValidOutput(body)) {
+			const conversation = body.conversation;
+			const pastUserInputs = conversation.past_user_inputs;
+			const generatedResponses = conversation.generated_responses;
+			const output = pastUserInputs
+				.filter(
+					(x, i) =>
+						x !== null && x !== undefined && generatedResponses[i] !== null && generatedResponses[i] !== undefined
+				)
+				.map((x, i) => ({
+					input: x ?? "",
+					response: generatedResponses[i] ?? "",
+				}));
+			return { conversation, output };
+		}
+		throw new TypeError(
+			"Invalid output: output must be of type <conversation: <generated_responses:Array; past_user_inputs:Array>>"
+		);
+	}
+
+	function applyInputSample(sample: WidgetExampleTextInput, opts: ExampleRunOpts = {}) {
+		text = sample.text;
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isTextInput}
+	exampleQueryParams={["text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<WidgetOutputConvo modelId={model.id} {output} />
+		<form>
+			<WidgetQuickInput
+				bind:value={text}
+				flatTop={true}
+				{isLoading}
+				{isDisabled}
+				onClickSubmitBtn={() => {
+					getOutput();
+				}}
+				submitButtonLabel="Send"
+			/>
+		</form>
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/DataTable.ts b/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/DataTable.ts
new file mode 100644
index 0000000000000000000000000000000000000000..551868a3874f1af0fe656766375ceaba8e82036a
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/DataTable.ts
@@ -0,0 +1,43 @@
+export class DataTable {
+	max: number;
+	min: number;
+	std: number;
+
+	constructor(public body: number[] | number[][]) {
+		const all = this.body.flat();
+		this.max = Math.max(...all);
+		this.min = Math.min(...all);
+		this.std = this.max - this.min;
+	}
+
+	get isArrLevel0(): boolean {
+		return isArrLevel0(this.body);
+	}
+
+	get oneDim(): number[] {
+		return this.body as number[];
+	}
+	get twoDim(): number[][] {
+		return this.body as number[][];
+	}
+
+	bg(value: number): string {
+		if (value > this.min + this.std * 0.7) {
+			return "bg-green-100 dark:bg-green-800";
+		}
+		if (value > this.min + this.std * 0.6) {
+			return "bg-green-50 dark:bg-green-900";
+		}
+		if (value < this.min + this.std * 0.3) {
+			return "bg-red-100 dark:bg-red-800";
+		}
+		if (value < this.min + this.std * 0.4) {
+			return "bg-red-50 dark:bg-red-900";
+		}
+		return "";
+	}
+}
+
+function isArrLevel0(x: number[] | number[][]): x is number[] {
+	return typeof x[0] === "number";
+}
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/FeatureExtractionWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/FeatureExtractionWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..d9f07a5268422fbcd2e5bd7d500148edcc8663a8
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/FeatureExtractionWidget.svelte
@@ -0,0 +1,200 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleTextInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetQuickInput from "../../shared/WidgetQuickInput/WidgetQuickInput.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	import { DataTable } from "./DataTable.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: DataTable | undefined;
+	let outputJson: string;
+	let text = "";
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedText = text.trim();
+
+		if (!trimmedText) {
+			error = "You need to input some text";
+			exampleOutput = undefined;
+			outputJson = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({ text: trimmedText });
+		}
+
+		const requestBody = { inputs: trimmedText };
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		exampleOutput = undefined;
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: any): DataTable {
+		if (Array.isArray(body)) {
+			if (body.length === 1) {
+				body = body[0];
+			}
+			return new DataTable(body);
+		}
+		throw new TypeError("Invalid output: output must be of type Array");
+	}
+
+	const SINGLE_DIM_COLS = 4;
+
+	function range(n: number, b?: number): number[] {
+		return b
+			? Array(b - n)
+					.fill(0)
+					.map((_, i) => n + i)
+			: Array(n)
+					.fill(0)
+					.map((_, i) => i);
+	}
+	const numOfRows = (total_elems: number) => {
+		return Math.ceil(total_elems / SINGLE_DIM_COLS);
+	};
+
+	function applyInputSample(sample: WidgetExampleTextInput, opts: ExampleRunOpts = {}) {
+		text = sample.text;
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isTextInput}
+	exampleQueryParams={["text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<WidgetQuickInput
+				bind:value={text}
+				{isLoading}
+				{isDisabled}
+				onClickSubmitBtn={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if output}
+			{#if output.isArrLevel0}
+				<div class="mt-3 h-96 overflow-auto">
+					<table class="w-full table-auto border text-right font-mono text-xs">
+						{#each range(numOfRows(output.oneDim.length)) as i}
+							<tr>
+								{#each range(SINGLE_DIM_COLS) as j}
+									{#if j * numOfRows(output.oneDim.length) + i < output.oneDim.length}
+										<td class="bg-gray-100 px-1 text-gray-400 dark:bg-gray-900">
+											{j * numOfRows(output.oneDim.length) + i}
+										</td>
+										<td class="px-1 py-0.5 {output.bg(output.oneDim[j * numOfRows(output.oneDim.length) + i])}">
+											{output.oneDim[j * numOfRows(output.oneDim.length) + i].toFixed(3)}
+										</td>
+									{/if}
+								{/each}
+							</tr>
+						{/each}
+					</table>
+				</div>
+			{:else}
+				<div class="mt-3 overflow-auto">
+					<table class="border text-right font-mono text-xs">
+						<tr>
+							<td class="bg-gray-100 dark:bg-gray-900" />
+							{#each range(output.twoDim[0].length) as j}
+								<td class="bg-gray-100 px-1 pt-1 text-gray-400 dark:bg-gray-900">{j}</td>
+							{/each}
+						</tr>
+						{#each output.twoDim as column, i}
+							<tr>
+								<td class="bg-gray-100 pl-4 pr-1 text-gray-400 dark:bg-gray-900">{i}</td>
+								{#each column as x}
+									<td class="px-1 py-1 {output.bg(x)}">
+										{x.toFixed(3)}
+									</td>
+								{/each}
+							</tr>
+						{/each}
+					</table>
+				</div>
+			{/if}
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/FillMaskWidget/FillMaskWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/FillMaskWidget/FillMaskWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..b8c00e06d9b58b523dcd547136363ecb1f32b22b
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/FillMaskWidget/FillMaskWidget.svelte
@@ -0,0 +1,178 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type {
+		WidgetExampleTextInput,
+		WidgetExampleOutputLabels,
+		WidgetExample,
+	} from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetOutputChart from "../../shared/WidgetOutputChart/WidgetOutputChart.svelte";
+	import WidgetTextarea from "../../shared/WidgetTextarea/WidgetTextarea.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isValidOutputLabels } from "$lib/components/InferenceWidget/shared/outputValidation.js";
+	import { isTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: Array<{ label: string; score: number }> = [];
+	let outputJson: string;
+	let text = "";
+	let setTextAreaValue: (text: string) => void;
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts<WidgetExampleOutputLabels> = {}) {
+		if (exampleOutput) {
+			output = exampleOutput;
+			outputJson = "";
+			return;
+		}
+
+		const trimmedText = text.trim();
+
+		if (!trimmedText) {
+			error = "You need to input some text";
+			output = [];
+			outputJson = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({ text: trimmedText });
+		}
+
+		const requestBody = { inputs: trimmedText };
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = [];
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: unknown): Array<{ label: string; score: number }> {
+		if (Array.isArray(body)) {
+			// entries = body -> text-classificartion
+			// entries = body[0] -> summarization
+			const entries = (model.pipeline_tag === "text-classification" ? body[0] ?? [] : body) as Record<
+				string,
+				unknown
+			>[];
+			return entries
+				.filter((x) => !!x)
+				.map((x) => ({
+					// label = x.label -> text-classificartion
+					label: x.label ? String(x.label) : String(x.token_str),
+					score: x.score ? Number(x.score) : 0,
+				}));
+		}
+		throw new TypeError("Invalid output: output must be of type Array");
+	}
+
+	function applyInputSample(sample: WidgetExampleTextInput<WidgetExampleOutputLabels>, opts: ExampleRunOpts = {}) {
+		setTextAreaValue(sample.text);
+		if (opts.isPreview) {
+			if (sample.output) {
+				output = sample.output;
+			} else {
+				output = [];
+			}
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+
+	function validateExample(sample: WidgetExample): sample is WidgetExampleTextInput<WidgetExampleOutputLabels> {
+		return isTextInput(sample) && (!sample.output || isValidOutputLabels(sample.output));
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	{validateExample}
+	exampleQueryParams={["text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			{#if model.pipeline_tag === "fill-mask"}
+				<div class="mb-1.5 text-sm text-gray-500">
+					Mask token: <code>{model.mask_token}</code>
+				</div>
+			{/if}
+			<WidgetTextarea bind:value={text} bind:setValue={setTextAreaValue} {isDisabled} />
+			<WidgetSubmitBtn
+				classNames="mt-2"
+				{isLoading}
+				{isDisabled}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		<WidgetOutputChart classNames="pt-4" {output} />
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageClassificationWidget/ImageClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageClassificationWidget/ImageClassificationWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..33961aed520414ca9179ae5029d81968381f4700
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageClassificationWidget/ImageClassificationWidget.svelte
@@ -0,0 +1,176 @@
+<script lang="ts">
+	import type { WidgetProps, InferenceRunOpts, ExampleRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type {
+		WidgetExample,
+		WidgetExampleAssetInput,
+		WidgetExampleOutputLabels,
+	} from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetDropzone from "../../shared/WidgetDropzone/WidgetDropzone.svelte";
+	import WidgetOutputChart from "../../shared/WidgetOutputChart/WidgetOutputChart.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { callInferenceApi, getBlobFromUrl } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isValidOutputLabels } from "$lib/components/InferenceWidget/shared/outputValidation.js";
+	import { isTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let imgSrc = "";
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: Array<{ label: string; score: number }> = [];
+	let outputJson: string;
+	let warning: string = "";
+
+	function onSelectFile(file: File | Blob) {
+		imgSrc = URL.createObjectURL(file);
+		getOutput(file);
+	}
+
+	async function getOutput(
+		file: File | Blob,
+		{ withModelLoading = false, isOnLoadCall = false, exampleOutput = undefined }: InferenceRunOpts = {}
+	) {
+		if (!file) {
+			return;
+		}
+
+		// Reset values
+		computeTime = "";
+		error = "";
+		warning = "";
+		output = [];
+		outputJson = "";
+
+		const requestBody = { file };
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+			if (output.length === 0) {
+				warning = "No classes were detected";
+			}
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput(file, { withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: unknown): Array<{ label: string; score: number }> {
+		if (isValidOutputLabels(body)) {
+			return body;
+		}
+		throw new TypeError("Invalid output: output must be of type Array<label: string, score:number>");
+	}
+
+	async function applyInputSample(
+		sample: WidgetExampleAssetInput<WidgetExampleOutputLabels>,
+		opts: ExampleRunOpts = {}
+	) {
+		imgSrc = sample.src;
+		if (opts.isPreview) {
+			if (isValidOutputLabels(sample.output)) {
+				output = sample.output;
+				outputJson = "";
+			} else {
+				output = [];
+				outputJson = "";
+			}
+			return;
+		}
+		const blob = await getBlobFromUrl(imgSrc);
+		const exampleOutput = sample.output;
+		getOutput(blob, { ...opts.inferenceOpts, exampleOutput });
+	}
+
+	function validateExample(sample: WidgetExample): sample is WidgetExampleAssetInput<WidgetExampleOutputLabels> {
+		return isTextInput(sample) && (!sample.output || isValidOutputLabels(sample.output));
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	{validateExample}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<WidgetDropzone
+				classNames="no-hover:hidden"
+				{isLoading}
+				{isDisabled}
+				{imgSrc}
+				{onSelectFile}
+				onError={(e) => (error = e)}
+			>
+				{#if imgSrc}
+					<img src={imgSrc} class="pointer-events-none mx-auto max-h-44 shadow" alt="" />
+				{/if}
+			</WidgetDropzone>
+			<!-- Better UX for mobile/table through CSS breakpoints -->
+			{#if imgSrc}
+				{#if imgSrc}
+					<div class="mb-2 flex justify-center bg-gray-50 dark:bg-gray-900 md:hidden">
+						<img src={imgSrc} class="pointer-events-none max-h-44" alt="" />
+					</div>
+				{/if}
+			{/if}
+			<WidgetFileInput
+				accept="image/*"
+				classNames="mr-2 md:hidden"
+				{isLoading}
+				{isDisabled}
+				label="Browse for image"
+				{onSelectFile}
+			/>
+			{#if warning}
+				<div class="alert alert-warning mt-2">{warning}</div>
+			{/if}
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		<WidgetOutputChart classNames="pt-4" {output} />
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/Canvas.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/Canvas.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..082b1062efac8aee2e10efe2d9bb7546c0438b74
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/Canvas.svelte
@@ -0,0 +1,75 @@
+<script lang="ts">
+	import type { ImageSegment } from "$lib/components/InferenceWidget/shared/types.js";
+
+	import { afterUpdate } from "svelte";
+
+	export let classNames = "";
+	export let highlightIndex: number;
+	export let imgSrc = "";
+	export let mousemove: (e: Event, canvasW: number, canvasH: number) => void = () => {};
+	export let mouseout: () => void = () => {};
+	export let output: ImageSegment[] = [];
+
+	let containerEl: HTMLElement;
+	let canvas: HTMLCanvasElement;
+	let imgEl: HTMLImageElement;
+	let width = 0;
+	let height = 0;
+	let startTs: DOMHighResTimeStamp;
+
+	const animDuration = 200;
+
+	function draw() {
+		width = containerEl.clientWidth;
+		height = containerEl.clientHeight;
+		startTs = performance.now();
+		darwHelper();
+	}
+
+	function darwHelper() {
+		const maskToDraw = output.reduce((arr: any[], o, i) => {
+			const mask = o?.bitmap;
+			if (mask && (i === highlightIndex || highlightIndex === -1)) {
+				arr.push(mask);
+			}
+			return arr;
+		}, []);
+
+		const ctx = canvas?.getContext("2d");
+
+		if (ctx) {
+			const duration = performance.now() - startTs;
+			ctx.globalAlpha = Math.min(duration, animDuration) / animDuration;
+			ctx.drawImage(imgEl, 0, 0, width, height);
+			for (const mask of maskToDraw) {
+				ctx.drawImage(mask, 0, 0, width, height);
+			}
+			if (duration < animDuration) {
+				// when using canvas, prefer to use requestAnimationFrame over setTimeout & setInterval
+				// https://developer.mozilla.org/en-US/docs/Web/API/window/requestAnimationFrame
+				window.requestAnimationFrame(darwHelper);
+			}
+		}
+	}
+
+	afterUpdate(draw);
+</script>
+
+<svelte:window on:resize={draw} />
+
+<div class="relative top-0 left-0 inline-flex {classNames}" bind:this={containerEl}>
+	<div class="flex max-w-sm justify-center">
+		<img alt="" class="relative top-0 left-0 object-contain" src={imgSrc} bind:this={imgEl} />
+	</div>
+	{#if output.length}
+		<!-- svelte-ignore a11y-mouse-events-have-key-events -->
+		<canvas
+			class="absolute top-0 left-0"
+			{width}
+			{height}
+			bind:this={canvas}
+			on:mousemove={(e) => mousemove(e, width, height)}
+			on:mouseout={mouseout}
+		/>
+	{/if}
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/ImageSegmentationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/ImageSegmentationWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..beff590a69dd47eb91df369e8cd75b34ed441300
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/ImageSegmentationWidget.svelte
@@ -0,0 +1,286 @@
+<script lang="ts">
+	import type {
+		WidgetProps,
+		ImageSegment,
+		ExampleRunOpts,
+		InferenceRunOpts,
+	} from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleAssetInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import { onMount } from "svelte";
+
+	import { COLORS } from "$lib/components/InferenceWidget/shared/consts.js";
+	import { clamp, mod, hexToRgb } from "$lib/utils/ViewUtils.js";
+	import { callInferenceApi, getBlobFromUrl } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetDropzone from "../../shared/WidgetDropzone/WidgetDropzone.svelte";
+	import WidgetOutputChart from "../../shared/WidgetOutputChart/WidgetOutputChart.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { isAssetInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	import Canvas from "./Canvas.svelte";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	const maskOpacity = Math.floor(255 * 0.6);
+	const colorToRgb = Object.fromEntries(
+		COLORS.map((color) => {
+			const [r, g, b]: number[] = hexToRgb(color.hex);
+			return [color.color, { r, g, b }];
+		})
+	);
+
+	let computeTime = "";
+	let error: string = "";
+	let highlightIndex = -1;
+	let isLoading = false;
+	let imgSrc = "";
+	let imgEl: HTMLImageElement;
+	let imgW = 0;
+	let imgH = 0;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: ImageSegment[] = [];
+	let outputJson: string;
+	let warning: string = "";
+
+	function onSelectFile(file: File | Blob) {
+		imgSrc = URL.createObjectURL(file);
+		getOutput(file);
+	}
+
+	async function getOutput(
+		file: File | Blob,
+		{ withModelLoading = false, isOnLoadCall = false, exampleOutput = undefined }: InferenceRunOpts = {}
+	) {
+		if (!file) {
+			return;
+		}
+
+		// Reset values
+		computeTime = "";
+		error = "";
+		warning = "";
+		output = [];
+		outputJson = "";
+
+		const requestBody = { file };
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			const output_ = res.output;
+			if (output_.length === 0) {
+				warning = "No object was detected";
+			} else {
+				imgW = imgEl.naturalWidth;
+				imgH = imgEl.naturalHeight;
+				isLoading = true;
+				output = (
+					await Promise.all(output_.map((o, idx) => addOutputColor(o, idx)).map((o) => addOutputCanvasData(o)))
+				).filter((o) => o !== undefined) as ImageSegment[];
+				isLoading = false;
+			}
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput(file, { withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function isValidOutput(arg: any): arg is ImageSegment[] {
+		return (
+			Array.isArray(arg) &&
+			arg.every((x) => typeof x.label === "string" && typeof x.score === "number" && typeof x.mask === "string")
+		);
+	}
+	function parseOutput(body: unknown): ImageSegment[] {
+		if (isValidOutput(body)) {
+			return body;
+		}
+		throw new TypeError("Invalid output: output must be of type Array<{label:string; score:number; mask: string}>");
+	}
+
+	function mouseout() {
+		highlightIndex = -1;
+	}
+
+	function mouseover(index: number) {
+		highlightIndex = index;
+	}
+
+	function mousemove(e: any, canvasW: number, canvasH: number) {
+		let { layerX, layerY } = e;
+		layerX = clamp(layerX, 0, canvasW);
+		layerY = clamp(layerY, 0, canvasH);
+		const row = Math.floor((layerX / canvasH) * imgH);
+		const col = Math.floor((layerY / canvasW) * imgW);
+		highlightIndex = -1;
+		const index = (imgW * col + row) * 4;
+		for (const [i, o] of output.entries()) {
+			const pixel = o?.imgData?.data[index];
+			if (pixel && pixel > 0) {
+				highlightIndex = i;
+			}
+		}
+	}
+
+	function addOutputColor(imgSegment: ImageSegment, idx: number) {
+		const hash = mod(idx, COLORS.length);
+		const { color } = COLORS[hash];
+		return { ...imgSegment, color };
+	}
+
+	async function addOutputCanvasData(imgSegment: ImageSegment): Promise<ImageSegment | undefined> {
+		const { mask, color } = imgSegment;
+
+		const maskImg = new Image();
+		maskImg.src = `data:image/png;base64, ${mask}`;
+		// await image.onload
+		await new Promise((resolve, _) => {
+			maskImg.onload = () => resolve(maskImg);
+		});
+		const imgData = getImageData(maskImg);
+		if (imgData && color) {
+			const { r, g, b } = colorToRgb[color];
+			const maskColored = [r, g, b, maskOpacity];
+			const background = Array(4).fill(0);
+
+			for (let i = 0; i < imgData.data.length; i += 4) {
+				const [r, g, b, a] = imgData.data[i] === 255 ? maskColored : background;
+				imgData.data[i] = r;
+				imgData.data[i + 1] = g;
+				imgData.data[i + 2] = b;
+				imgData.data[i + 3] = a;
+			}
+
+			const bitmap = await createImageBitmap(imgData);
+			return { ...imgSegment, imgData, bitmap };
+		}
+	}
+
+	function getImageData(maskImg: CanvasImageSource): ImageData | undefined {
+		const tmpCanvas = document.createElement("canvas");
+		tmpCanvas.width = imgW;
+		tmpCanvas.height = imgH;
+		const tmpCtx = tmpCanvas.getContext("2d");
+		tmpCtx?.drawImage(maskImg, 0, 0, imgW, imgH);
+		const segmentData = tmpCtx?.getImageData(0, 0, imgW, imgH);
+		return segmentData;
+	}
+
+	// original: https://gist.github.com/MonsieurV/fb640c29084c171b4444184858a91bc7
+	function polyfillCreateImageBitmap() {
+		(window as any).createImageBitmap = async function (data: ImageData): Promise<ImageBitmap> {
+			return new Promise((resolve, _) => {
+				const canvas = document.createElement("canvas");
+				const ctx = canvas.getContext("2d");
+				canvas.width = data.width;
+				canvas.height = data.height;
+				ctx?.putImageData(data, 0, 0);
+				const dataURL = canvas.toDataURL();
+				const img = document.createElement("img");
+				img.addEventListener("load", () => {
+					resolve(img as any as ImageBitmap);
+				});
+				img.src = dataURL;
+			});
+		};
+	}
+
+	async function applyInputSample(sample: WidgetExampleAssetInput, opts: ExampleRunOpts = {}) {
+		imgSrc = sample.src;
+		if (opts.isPreview) {
+			output = [];
+			outputJson = "";
+			return;
+		}
+		const blob = await getBlobFromUrl(imgSrc);
+		const exampleOutput = sample.output;
+		getOutput(blob, { ...opts.inferenceOpts, exampleOutput });
+	}
+
+	onMount(() => {
+		if (typeof createImageBitmap === "undefined") {
+			polyfillCreateImageBitmap();
+		}
+	});
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isAssetInput}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<WidgetDropzone
+				classNames="hidden md:block"
+				{isLoading}
+				{isDisabled}
+				{imgSrc}
+				{onSelectFile}
+				onError={(e) => (error = e)}
+			>
+				{#if imgSrc}
+					<Canvas {imgSrc} {highlightIndex} {mousemove} {mouseout} {output} />
+				{/if}
+			</WidgetDropzone>
+			<!-- Better UX for mobile/table through CSS breakpoints -->
+			{#if imgSrc}
+				<Canvas classNames="mr-2 md:hidden" {imgSrc} {highlightIndex} {mousemove} {mouseout} {output} />
+			{/if}
+			<WidgetFileInput
+				accept="image/*"
+				classNames="mr-2 md:hidden"
+				{isLoading}
+				{isDisabled}
+				label="Browse for image"
+				{onSelectFile}
+			/>
+			{#if warning}
+				<div class="alert alert-warning mt-2">{warning}</div>
+			{/if}
+			<img alt="" bind:this={imgEl} class="hidden" src={imgSrc} />
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		<WidgetOutputChart classNames="pt-4" {output} {highlightIndex} {mouseover} {mouseout} />
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToImageWidget/ImageToImageWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToImageWidget/ImageToImageWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..2d1ecbf36d3772b87da48cd4dee4ff0f42d67b06
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToImageWidget/ImageToImageWidget.svelte
@@ -0,0 +1,202 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleAssetAndPromptInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetDropzone from "../../shared/WidgetDropzone/WidgetDropzone.svelte";
+	import WidgetTextInput from "../../shared/WidgetTextInput/WidgetTextInput.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { addInferenceParameters, callInferenceApi } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isAssetAndPromptInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output = "";
+	let outputJson = "";
+	let prompt = "";
+	let imgSrc = "";
+	let imageBase64 = "";
+
+	async function onSelectFile(file: File | Blob) {
+		imgSrc = URL.createObjectURL(file);
+		await updateImageBase64(file);
+	}
+
+	function updateImageBase64(file: File | Blob): Promise<void> {
+		return new Promise((resolve, reject) => {
+			const fileReader: FileReader = new FileReader();
+			fileReader.onload = async () => {
+				try {
+					const imageBase64WithPrefix: string = fileReader.result as string;
+					imageBase64 = imageBase64WithPrefix.split(",")[1]; // remove prefix
+					isLoading = false;
+					resolve();
+				} catch (err) {
+					reject(err);
+				}
+			};
+			fileReader.onerror = (e) => reject(e);
+			isLoading = true;
+			fileReader.readAsDataURL(file);
+		});
+	}
+
+	function parseOutput(body: unknown): string {
+		if (body && typeof body === "object" && body instanceof Blob) {
+			return URL.createObjectURL(body);
+		}
+		throw new TypeError("Invalid output: output must be of type object & of instance Blob");
+	}
+
+	async function applyInputSample(sample: WidgetExampleAssetAndPromptInput, opts: ExampleRunOpts = {}) {
+		prompt = sample.prompt;
+		imgSrc = sample.src;
+		if (opts.isPreview) {
+			return;
+		}
+		const res = await fetch(imgSrc);
+		const blob = await res.blob();
+		await updateImageBase64(blob);
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedPrompt = prompt.trim();
+
+		if (!imageBase64) {
+			error = "You need to upload an image";
+			output = "";
+			outputJson = "";
+			return;
+		}
+
+		const requestBody = {
+			inputs: imageBase64,
+			parameters: {
+				prompt: trimmedPrompt,
+			},
+		};
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = "";
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isAssetAndPromptInput}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form class="space-y-2">
+			<WidgetDropzone
+				classNames="hidden md:block"
+				{isLoading}
+				{isDisabled}
+				{imgSrc}
+				{onSelectFile}
+				onError={(e) => (error = e)}
+			>
+				{#if imgSrc}
+					<img src={imgSrc} class="pointer-events-none mx-auto max-h-44 shadow" alt="" />
+				{/if}
+			</WidgetDropzone>
+			<!-- Better UX for mobile/table through CSS breakpoints -->
+			{#if imgSrc}
+				{#if imgSrc}
+					<div class="mb-2 flex justify-center bg-gray-50 dark:bg-gray-900 md:hidden">
+						<img src={imgSrc} class="pointer-events-none max-h-44" alt="" />
+					</div>
+				{/if}
+			{/if}
+			<WidgetFileInput
+				accept="image/*"
+				classNames="mr-2 md:hidden"
+				{isLoading}
+				{isDisabled}
+				label="Browse for image"
+				{onSelectFile}
+			/>
+			<WidgetTextInput
+				bind:value={prompt}
+				{isDisabled}
+				label="(Optional) Text-guidance if the model has support for it"
+				placeholder="Your prompt here..."
+			/>
+			<WidgetSubmitBtn
+				{isLoading}
+				{isDisabled}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if output.length}
+			<div class="mt-4 flex justify-center bg-gray-50 dark:bg-gray-925">
+				<img class="max-w-sm object-contain" src={output} alt="" />
+			</div>
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToTextWidget/ImageToTextWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToTextWidget/ImageToTextWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..0b7acee47339414c9b08fc0e074eb0baa08dad3d
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToTextWidget/ImageToTextWidget.svelte
@@ -0,0 +1,161 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleAssetInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetDropzone from "../../shared/WidgetDropzone/WidgetDropzone.svelte";
+	import WidgetOutputText from "../../shared/WidgetOutputText/WidgetOutputText.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { callInferenceApi, getBlobFromUrl } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isAssetInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let imgSrc = "";
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output = "";
+	let outputJson: string;
+	let warning: string = "";
+
+	function onSelectFile(file: File | Blob) {
+		imgSrc = URL.createObjectURL(file);
+		getOutput(file);
+	}
+
+	async function getOutput(
+		file: File | Blob,
+		{ withModelLoading = false, isOnLoadCall = false, exampleOutput = undefined }: InferenceRunOpts = {}
+	) {
+		if (!file) {
+			return;
+		}
+
+		// Reset values
+		computeTime = "";
+		error = "";
+		warning = "";
+		output = "";
+		outputJson = "";
+
+		const requestBody = { file };
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+			if (output.length === 0) {
+				warning = "No classes were detected";
+			}
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput(file, { withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: unknown): string {
+		if (Array.isArray(body) && body.length) {
+			const firstEntry = body[0];
+			return firstEntry["generated_text"];
+		}
+		throw new TypeError("Invalid output: output must be of type Array & non-empty");
+	}
+
+	async function applyInputSample(sample: WidgetExampleAssetInput, opts: ExampleRunOpts = {}) {
+		imgSrc = sample.src;
+		if (opts.isPreview) {
+			output = "";
+			outputJson = "";
+			return;
+		}
+		const blob = await getBlobFromUrl(imgSrc);
+		getOutput(blob);
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isAssetInput}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<WidgetDropzone
+				classNames="hidden md:block"
+				{isLoading}
+				{isDisabled}
+				{imgSrc}
+				{onSelectFile}
+				onError={(e) => (error = e)}
+			>
+				{#if imgSrc}
+					<img src={imgSrc} class="pointer-events-none mx-auto max-h-44 shadow" alt="" />
+				{/if}
+			</WidgetDropzone>
+			<!-- Better UX for mobile/table through CSS breakpoints -->
+			{#if imgSrc}
+				{#if imgSrc}
+					<div class="mb-2 flex justify-center bg-gray-50 dark:bg-gray-900 md:hidden">
+						<img src={imgSrc} class="pointer-events-none max-h-44" alt="" />
+					</div>
+				{/if}
+			{/if}
+			<WidgetFileInput
+				accept="image/*"
+				classNames="mr-2 md:hidden"
+				{isLoading}
+				{isDisabled}
+				label="Browse for image"
+				{onSelectFile}
+			/>
+			{#if warning}
+				<div class="alert alert-warning mt-2">{warning}</div>
+			{/if}
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if model?.pipeline_tag !== "text-generation"}
+			<WidgetOutputText classNames="mt-4" {output} />
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/ObjectDetectionWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/ObjectDetectionWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..a14e3ba2863da720832e21ca28f5a3bdfb85ba27
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/ObjectDetectionWidget.svelte
@@ -0,0 +1,197 @@
+<script lang="ts">
+	import type {
+		WidgetProps,
+		DetectedObject,
+		ExampleRunOpts,
+		InferenceRunOpts,
+	} from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleAssetInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import { mod } from "$lib/utils/ViewUtils.js";
+	import { COLORS } from "$lib/components/InferenceWidget/shared/consts.js";
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetDropzone from "../../shared/WidgetDropzone/WidgetDropzone.svelte";
+	import WidgetOutputChart from "../../shared/WidgetOutputChart/WidgetOutputChart.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { callInferenceApi, getBlobFromUrl } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isAssetInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	import BoundingBoxes from "./SvgBoundingBoxes.svelte";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let imgSrc = "";
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: DetectedObject[] = [];
+	let outputJson: string;
+	let highlightIndex = -1;
+	let warning: string = "";
+
+	function onSelectFile(file: File | Blob) {
+		imgSrc = URL.createObjectURL(file);
+		getOutput(file);
+	}
+
+	async function getOutput(
+		file: File | Blob,
+		{ withModelLoading = false, isOnLoadCall = false, exampleOutput = undefined }: InferenceRunOpts = {}
+	) {
+		if (!file) {
+			return;
+		}
+
+		// Reset values
+		computeTime = "";
+		error = "";
+		warning = "";
+		output = [];
+		outputJson = "";
+
+		const requestBody = { file };
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			output = output.map((o, idx) => addOutputColor(o, idx));
+			outputJson = res.outputJson;
+			if (output.length === 0) {
+				warning = "No object was detected";
+			}
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput(file, { withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function addOutputColor(detObj: DetectedObject, idx: number) {
+		const hash = mod(idx, COLORS.length);
+		const { color } = COLORS[hash];
+		return { ...detObj, color };
+	}
+
+	function isValidOutput(arg: any): arg is DetectedObject[] {
+		return (
+			Array.isArray(arg) &&
+			arg.every(
+				(x) =>
+					typeof x.label === "string" &&
+					typeof x.score === "number" &&
+					typeof x.box.xmin === "number" &&
+					typeof x.box.ymin === "number" &&
+					typeof x.box.xmax === "number" &&
+					typeof x.box.ymax === "number"
+			)
+		);
+	}
+
+	function parseOutput(body: unknown): DetectedObject[] {
+		if (isValidOutput(body)) {
+			return body;
+		}
+		throw new TypeError(
+			"Invalid output: output must be of type Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>"
+		);
+	}
+
+	function mouseout() {
+		highlightIndex = -1;
+	}
+
+	function mouseover(index: number) {
+		highlightIndex = index;
+	}
+
+	async function applyInputSample(sample: WidgetExampleAssetInput, opts: ExampleRunOpts = {}) {
+		imgSrc = sample.src;
+		if (opts.isPreview) {
+			output = [];
+			outputJson = "";
+			return;
+		}
+		const blob = await getBlobFromUrl(imgSrc);
+		const exampleOutput = sample.output;
+		getOutput(blob, { ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isAssetInput}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<WidgetDropzone
+				classNames="hidden md:block"
+				{isLoading}
+				{isDisabled}
+				{imgSrc}
+				{onSelectFile}
+				onError={(e) => (error = e)}
+			>
+				{#if imgSrc}
+					<BoundingBoxes {imgSrc} {mouseover} {mouseout} {output} {highlightIndex} />
+				{/if}
+			</WidgetDropzone>
+			<!-- Better UX for mobile/table through CSS breakpoints -->
+			{#if imgSrc}
+				<BoundingBoxes classNames="mb-2 md:hidden" {imgSrc} {mouseover} {mouseout} {output} {highlightIndex} />
+			{/if}
+			<WidgetFileInput
+				accept="image/*"
+				classNames="mr-2 md:hidden"
+				{isLoading}
+				{isDisabled}
+				label="Browse for image"
+				{onSelectFile}
+			/>
+			{#if warning}
+				<div class="alert alert-warning mt-2">{warning}</div>
+			{/if}
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		<WidgetOutputChart classNames="pt-4" {output} {highlightIndex} {mouseover} {mouseout} />
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/SvgBoundingBoxes.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/SvgBoundingBoxes.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..90446b606298b2d469c84d48e7f61d52da7d695f
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/SvgBoundingBoxes.svelte
@@ -0,0 +1,93 @@
+<!-- 
+for Tailwind:
+text-red-400
+text-green-400
+text-yellow-400
+text-blue-400
+text-orange-400
+text-purple-400
+text-cyan-400
+text-lime-400
+ -->
+<script lang="ts">
+	import type { DetectedObject } from "$lib/components/InferenceWidget/shared/types.js";
+
+	import { afterUpdate } from "svelte";
+
+	interface Rect {
+		x: number;
+		y: number;
+		width: number;
+		height: number;
+	}
+
+	let containerEl: HTMLElement;
+	let imgEl: HTMLImageElement;
+	let wrapperHeight = 0;
+	let wrapperWidth = 0;
+	let boxes: Array<{
+		color?: string;
+		index: number;
+		rect: Rect;
+	}> = [];
+
+	export let classNames = "";
+	export let highlightIndex = -1;
+	export let imgSrc: string;
+	export let output: DetectedObject[] = [];
+	export let mouseover: (index: number) => void = () => {};
+	export let mouseout: () => void = () => {};
+
+	$: {
+		if (imgEl?.naturalWidth && imgEl?.naturalHeight) {
+			const widthScale = wrapperWidth / imgEl.naturalWidth;
+			const heightScale = wrapperHeight / imgEl.naturalHeight;
+			boxes = output
+				.map((val, index) => ({ ...val, index }))
+				.map(({ box, color, index }) => {
+					const rect = {
+						x: box.xmin * widthScale,
+						y: box.ymin * heightScale,
+						width: (box.xmax - box.xmin) * widthScale,
+						height: (box.ymax - box.ymin) * heightScale,
+					};
+					return { rect, color, index };
+				})
+				.sort((a, b) => getArea(b.rect) - getArea(a.rect));
+		}
+	}
+
+	function getArea(rect: Rect): number {
+		return rect.width * rect.height;
+	}
+
+	afterUpdate(() => {
+		wrapperWidth = containerEl.clientWidth;
+		wrapperHeight = containerEl.clientHeight;
+	});
+</script>
+
+<div class="relative top-0 left-0 inline-flex {classNames}" bind:this={containerEl}>
+	<div class="flex max-w-sm justify-center">
+		<img alt="" class="relative top-0 left-0 object-contain" src={imgSrc} bind:this={imgEl} />
+	</div>
+
+	<svg
+		class="absolute top-0 left-0"
+		viewBox={`0 0 ${wrapperWidth} ${wrapperHeight}`}
+		xmlns="http://www.w3.org/2000/svg"
+	>
+		{#each boxes as { rect, color, index }}
+			<!-- svelte-ignore a11y-mouse-events-have-key-events -->
+			<rect
+				class="transition duration-200 ease-in-out text-{color}-400 fill-current stroke-current"
+				fill-opacity={highlightIndex === -1 || highlightIndex === index ? "0.1" : "0.0"}
+				opacity={highlightIndex === -1 || highlightIndex === index ? "1" : "0.0"}
+				{...rect}
+				stroke-width="2"
+				on:mouseover={() => mouseover(index)}
+				on:mouseout={mouseout}
+			/>
+		{/each}
+	</svg>
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/QuestionAnsweringWidget/QuestionAnsweringWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/QuestionAnsweringWidget/QuestionAnsweringWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..53479ee8f75b0aa2005cc376dacd3550cfd42e86
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/QuestionAnsweringWidget/QuestionAnsweringWidget.svelte
@@ -0,0 +1,177 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type {
+		WidgetExample,
+		WidgetExampleOutputAnswerScore,
+		WidgetExampleTextAndContextInput,
+	} from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetQuickInput from "../../shared/WidgetQuickInput/WidgetQuickInput.svelte";
+	import WidgetTextarea from "../../shared/WidgetTextarea/WidgetTextarea.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isValidOutputAnswerScore } from "$lib/components/InferenceWidget/shared/outputValidation.js";
+	import { isTextAndContextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let context = "";
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: { answer: string; score: number } | null = null;
+	let outputJson: string;
+	let question = "";
+	let setTextAreaValue: (text: string) => void;
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedQuestion = question.trim();
+		const trimmedContext = context.trim();
+
+		if (!trimmedQuestion) {
+			error = "You need to input a question";
+			output = null;
+			outputJson = "";
+			return;
+		}
+
+		if (!trimmedContext) {
+			error = "You need to input some context";
+			output = null;
+			outputJson = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({ context: trimmedContext, text: trimmedQuestion });
+		}
+
+		const requestBody = {
+			inputs: { question: trimmedQuestion, context: trimmedContext },
+		};
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = null;
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: any): { answer: string; score: number } {
+		if (isValidOutputAnswerScore(body)) {
+			return body;
+		}
+		throw new TypeError("Invalid output: output must be of type <answer:string; score:number>");
+	}
+
+	function applyInputSample(
+		sample: WidgetExampleTextAndContextInput<WidgetExampleOutputAnswerScore>,
+		opts: ExampleRunOpts = {}
+	) {
+		question = sample.text;
+		setTextAreaValue(sample.context);
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+
+	function validateExample(
+		sample: WidgetExample
+	): sample is WidgetExampleTextAndContextInput<WidgetExampleOutputAnswerScore> {
+		return isTextAndContextInput(sample) && (!sample.output || isValidOutputAnswerScore(sample.output));
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	{validateExample}
+	exampleQueryParams={["context", "text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form class="space-y-2">
+			<WidgetQuickInput
+				bind:value={question}
+				{isLoading}
+				{isDisabled}
+				onClickSubmitBtn={() => {
+					getOutput();
+				}}
+			/>
+			<WidgetTextarea
+				bind:value={context}
+				bind:setValue={setTextAreaValue}
+				{isDisabled}
+				placeholder="Please input some context..."
+				label="Context"
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if output}
+			<div class="alert alert-success mt-4 flex items-baseline">
+				<span>{output.answer}</span>
+				<span class="ml-auto font-mono text-xs">{output.score.toFixed(3)}</span>
+			</div>
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ReinforcementLearningWidget/ReinforcementLearningWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ReinforcementLearningWidget/ReinforcementLearningWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..3e6714ab2f23a522bf898e27dbfa1a010b4171b3
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ReinforcementLearningWidget/ReinforcementLearningWidget.svelte
@@ -0,0 +1,62 @@
+<script lang="ts">
+	import type { WidgetProps } from "$lib/components/InferenceWidget/shared/types.js";
+
+	import { onMount } from "svelte";
+
+	import IconSpin from "$lib/components/Icons/IconSpin.svelte";
+	import IconCross from "$lib/components/Icons/IconCross.svelte";
+	import WidgetHeader from "../../shared/WidgetHeader/WidgetHeader.svelte";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let model: WidgetProps["model"];
+
+	enum Replay {
+		Loading,
+		Available,
+		Unavailable,
+	}
+
+	let replay = Replay.Loading;
+	let replaySrc = "";
+
+	function doesReplayExist() {
+		// check if repository has `replay.mp4` file
+		replaySrc = `/${model.id}/resolve/main/replay.mp4`;
+		const xhr = new XMLHttpRequest();
+		xhr.open("HEAD", replaySrc, false);
+		xhr.send();
+		return xhr.status !== 404;
+	}
+
+	onMount(() => {
+		replay = doesReplayExist() ? Replay.Available : Replay.Unavailable;
+	});
+</script>
+
+<!-- 
+	WidgetWrapper.svelte component is not used because this pipeline widget (reinforcement-learning)
+	does NOT use InferenceAPI (unlike other pipelines widgets)
+-->
+<div class="flex w-full max-w-full flex-col">
+	<WidgetHeader title="Video Preview" pipeline="reinforcement-learning" />
+	<div class="w-full overflow-hidden rounded-lg">
+		{#if replay === Replay.Available}
+			<!-- svelte-ignore a11y-media-has-caption -->
+			<video class="w-full" src={replaySrc} controls autoplay muted loop />
+		{:else if replay === Replay.Unavailable}
+			<div class="flex items-center justify-center rounded-lg border py-6 text-sm text-gray-500">
+				{#if !!apiToken}
+					Add<span class="font-mono text-xs">&nbsp;./replay.mp4&nbsp;</span>file to display a preview
+				{:else}
+					<IconCross />
+					Preview not found
+				{/if}
+			</div>
+		{:else}
+			<div class="flex items-center justify-center rounded-lg border py-6 text-sm text-gray-500">
+				<IconSpin classNames="mr-1.5 text-purple-400 dark:text-purple-200 animate-spin mt-0.5" />
+				<span class="text-gray-500">loading</span>
+			</div>
+		{/if}
+	</div>
+</div>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/SentenceSimilarityWidget/SentenceSimilarityWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/SentenceSimilarityWidget/SentenceSimilarityWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..07308d7a847abebbf929b972b60a450abc67e2b9
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/SentenceSimilarityWidget/SentenceSimilarityWidget.svelte
@@ -0,0 +1,187 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleSentenceSimilarityInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetOutputChart from "../../shared/WidgetOutputChart/WidgetOutputChart.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetAddSentenceBtn from "../../shared/WidgetAddSentenceBtn/WidgetAddSentenceBtn.svelte";
+	import WidgetTextInput from "../../shared/WidgetTextInput/WidgetTextInput.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { addInferenceParameters, callInferenceApi } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isSentenceSimilarityInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let sourceSentence = "";
+	let comparisonSentences: Array<string> = [];
+	let nComparisonSentences = 2;
+	const maxComparisonSentences = 5;
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: Array<{ label: string; score: number }> = [];
+	let outputJson: string;
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedSourceSentence = sourceSentence.trim();
+		if (!trimmedSourceSentence) {
+			error = "You need to input some text";
+			output = [];
+			outputJson = "";
+			return;
+		}
+
+		const trimmedComparisonSentences: Array<string> = [];
+		let emptySentence = false;
+		for (const sentence of comparisonSentences) {
+			const trimmedSentence = sentence.trim();
+			if (!trimmedSentence) {
+				emptySentence = true;
+			}
+			trimmedComparisonSentences.push(trimmedSentence);
+		}
+		if (emptySentence) {
+			error = "You need to specify the comparison sentences";
+			output = [];
+			outputJson = "";
+			return;
+		}
+		if (comparisonSentences.length < 2) {
+			error = "You need to specify at least 2 comparison sentences";
+			output = [];
+			outputJson = "";
+			return;
+		}
+
+		const requestBody = {
+			inputs: {
+				source_sentence: trimmedSourceSentence,
+				sentences: trimmedComparisonSentences,
+			},
+		};
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = [];
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			for (let i = 0; i < res.output.length; i++) {
+				output.push({
+					label: trimmedComparisonSentences[i],
+					score: res.output[i],
+				});
+			}
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: unknown): Array<number> {
+		if (Array.isArray(body)) {
+			return body;
+		}
+		throw new TypeError("Invalid output: output must be of type Array");
+	}
+
+	function applyInputSample(sample: WidgetExampleSentenceSimilarityInput, opts: ExampleRunOpts = {}) {
+		sourceSentence = sample.source_sentence;
+		comparisonSentences = sample.sentences;
+		nComparisonSentences = comparisonSentences.length;
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isSentenceSimilarityInput}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form class="flex flex-col space-y-2">
+			<WidgetTextInput
+				bind:value={sourceSentence}
+				{isDisabled}
+				label="Source Sentence"
+				placeholder={isDisabled ? "" : "Your sentence here..."}
+			/>
+			<WidgetTextInput
+				bind:value={comparisonSentences[0]}
+				{isDisabled}
+				label="Sentences to compare to"
+				placeholder={isDisabled ? "" : "Your sentence here..."}
+			/>
+			{#each Array(nComparisonSentences - 1) as _, idx}
+				<WidgetTextInput bind:value={comparisonSentences[idx + 1]} {isDisabled} placeholder="Your sentence here..." />
+			{/each}
+			<WidgetAddSentenceBtn
+				isDisabled={nComparisonSentences === maxComparisonSentences || isDisabled}
+				onClick={() => {
+					nComparisonSentences++;
+				}}
+			/>
+			<WidgetSubmitBtn
+				{isLoading}
+				{isDisabled}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if output.length}
+			<WidgetOutputChart classNames="pt-4" {output} />
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/SummarizationWidget/SummarizationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/SummarizationWidget/SummarizationWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..e2e4680d148156e1a75e2ad5fd8f0f9d76f960e0
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/SummarizationWidget/SummarizationWidget.svelte
@@ -0,0 +1,140 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleTextInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetOutputText from "../../shared/WidgetOutputText/WidgetOutputText.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetTextarea from "../../shared/WidgetTextarea/WidgetTextarea.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output = "";
+	let outputJson: string;
+	let text = "";
+	let setTextAreaValue: (text: string) => void;
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedValue = text.trim();
+
+		if (!trimmedValue) {
+			error = "You need to input some text";
+			output = "";
+			outputJson = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({ text: trimmedValue });
+		}
+
+		const requestBody = { inputs: trimmedValue };
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = "";
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: unknown): string {
+		if (Array.isArray(body) && body.length) {
+			return body[0]?.["summary_text"] ?? "";
+		}
+		throw new TypeError("Invalid output: output must be of type Array & non-empty");
+	}
+
+	function applyInputSample(sample: WidgetExampleTextInput, opts: ExampleRunOpts = {}) {
+		setTextAreaValue(sample.text);
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isTextInput}
+	exampleQueryParams={["text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form class="space-y-2">
+			<WidgetTextarea bind:value={text} bind:setValue={setTextAreaValue} {isDisabled} />
+			<WidgetSubmitBtn
+				{isLoading}
+				{isDisabled}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		<WidgetOutputText classNames="mt-4" {output} />
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TableQuestionAnsweringWidget/TableQuestionAnsweringWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TableQuestionAnsweringWidget/TableQuestionAnsweringWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..d252b25c843d1bc438221bc10f6890d244b89c16
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TableQuestionAnsweringWidget/TableQuestionAnsweringWidget.svelte
@@ -0,0 +1,196 @@
+<script lang="ts">
+	import type {
+		WidgetProps,
+		HighlightCoordinates,
+		ExampleRunOpts,
+		InferenceRunOpts,
+	} from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleTextAndTableInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetQuickInput from "../../shared/WidgetQuickInput/WidgetQuickInput.svelte";
+	import WidgetOutputTableQA from "../../shared/WidgetOutputTableQA/WidgetOutputTableQA.svelte";
+	import WidgetTableInput from "../../shared/WidgetTableInput/WidgetTableInput.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		convertDataToTable,
+		convertTableToData,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isTextAndTableInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+	interface Output {
+		aggregator?: string;
+		answer: string;
+		coordinates: [number, number][];
+		cells: number[];
+	}
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: Output | null = null;
+	let outputJson: string;
+	let table: (string | number)[][] = [[]];
+	let query = "";
+	let isAnswerOnlyOutput = false;
+
+	let highlighted: HighlightCoordinates = {};
+	$: highlighted = Object.fromEntries(
+		output?.coordinates.flatMap(([yCor, xCor]) => [
+			[`${yCor}`, "bg-green-50 dark:bg-green-900"],
+			[`${yCor}-${xCor}`, "bg-green-100 border-green-100 dark:bg-green-700 dark:border-green-700"],
+		]) ?? []
+	);
+
+	function onChangeTable(updatedTable: (string | number)[][]) {
+		table = updatedTable;
+	}
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedQuery = query.trim();
+
+		if (!trimmedQuery) {
+			error = "You need to input a query";
+			output = null;
+			outputJson = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({
+				text: trimmedQuery,
+				table: JSON.stringify(convertTableToData(table)),
+			});
+		}
+
+		const requestBody = {
+			inputs: {
+				query: trimmedQuery,
+				table: convertTableToData(table),
+			},
+		};
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = null;
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function isValidOutput(arg: any): arg is Output {
+		return (
+			arg &&
+			typeof arg === "object" &&
+			typeof arg["answer"] === "string" &&
+			(arg["aggregator"] === undefined ? true : typeof arg["aggregator"] === "string") &&
+			(Array.isArray(arg["coordinates"]) || isAnswerOnlyOutput) &&
+			(Array.isArray(arg["cells"]) || isAnswerOnlyOutput)
+		);
+	}
+
+	function parseOutput(body: any): Output {
+		if (body["coordinates"] === undefined && body["cells"] === undefined) {
+			isAnswerOnlyOutput = true;
+		}
+		if (isValidOutput(body)) {
+			return body;
+		}
+		throw new TypeError(
+			"Invalid output: output must be of type <answer:string; coordinates?:Array; cells?:Array; aggregator?:string>"
+		);
+	}
+
+	function applyInputSample(sample: WidgetExampleTextAndTableInput, opts: ExampleRunOpts = {}) {
+		query = sample.text;
+		table = convertDataToTable(sample.table);
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isTextAndTableInput}
+	exampleQueryParams={["text", "table"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<WidgetQuickInput
+				bind:value={query}
+				{isLoading}
+				{isDisabled}
+				onClickSubmitBtn={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+		<div class="mt-4">
+			{#if output}
+				<WidgetOutputTableQA {output} {isAnswerOnlyOutput} />
+			{/if}
+			{#if table.length > 1 || table[0].length > 1}
+				<WidgetTableInput {highlighted} onChange={onChangeTable} {table} {isDisabled} />
+			{/if}
+		</div>
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TabularDataWidget/TabularDataWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TabularDataWidget/TabularDataWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..3c0a12f478d3b0b0197bd8bfc943d88a1349f38e
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TabularDataWidget/TabularDataWidget.svelte
@@ -0,0 +1,228 @@
+<script lang="ts">
+	import type {
+		WidgetProps,
+		HighlightCoordinates,
+		InferenceRunOpts,
+		ExampleRunOpts,
+	} from "$lib/components/InferenceWidget/shared/types.js";
+	import type {
+		WidgetExampleStructuredDataInput,
+		WidgetExampleOutputLabels,
+	} from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetTableInput from "../../shared/WidgetTableInput/WidgetTableInput.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import { mod } from "$lib/utils/ViewUtils.js";
+	import {
+		addInferenceParameters,
+		convertDataToTable,
+		convertTableToData,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isStructuredDataInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+
+	const widgetData = model?.widgetData?.[0] as WidgetExampleStructuredDataInput<WidgetExampleOutputLabels> | undefined;
+	const columns: string[] = Object.keys(widgetData?.structured_data ?? {});
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: (string | number)[] | null = [];
+	let outputJson: string;
+	let table: (string | number)[][] = [columns];
+
+	let highlighted: HighlightCoordinates = {};
+	let highlightErrorKey = "";
+	let scrollTableToRight: () => Promise<void>;
+	let tableWithOutput: (string | number)[][];
+	$: {
+		const structuredData = convertTableToData(table);
+		if (output?.length) {
+			structuredData.Prediction = output;
+			const lastColIndex = Object.keys(structuredData).length - 1;
+			highlighted = highlightOutput(output, lastColIndex);
+			scrollTableToRight();
+		} else {
+			delete structuredData.Prediction;
+			highlighted = {};
+			if (highlightErrorKey) {
+				highlighted[highlightErrorKey] = "bg-red-100 border-red-100 dark:bg-red-800 dark:border-red-800";
+				highlightErrorKey = "";
+			}
+		}
+		tableWithOutput = convertDataToTable(structuredData);
+	}
+
+	const COLORS = ["blue", "green", "yellow", "purple", "red"] as const;
+
+	function onChangeTable(updatedTable: (string | number)[][]) {
+		table = updatedTable;
+		output = [];
+	}
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		for (let [i, row] of table.entries()) {
+			for (const [j, cell] of row.entries()) {
+				if (!String(cell)) {
+					error = `Missing value at row=${i} & column='${columns[j]}'`;
+					highlightErrorKey = `${--i}-${j}`;
+					output = null;
+					outputJson = "";
+					return;
+				}
+				// tabular-data backend expects value `NaN` for "null value"
+				if (/(null|nan)/i.test(String(cell))) {
+					table[i][j] = "NaN";
+				}
+			}
+		}
+
+		// strip prediction column
+		// eslint-disable-next-line @typescript-eslint/naming-convention
+		const { Prediction, ...tableWithoutOutput } = convertTableToData(tableWithOutput);
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({
+				structured_data: JSON.stringify(tableWithoutOutput),
+			});
+		}
+
+		const requestBody = {
+			inputs: {
+				data: tableWithoutOutput,
+			},
+		};
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = null;
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function isValidOutput(arg: any): arg is (string | number)[] {
+		return Array.isArray(arg) && arg.every((x) => typeof x === "string" || typeof x === "number");
+	}
+
+	function parseOutput(body: unknown): (string | number)[] {
+		if (isValidOutput(body)) {
+			return body;
+		}
+		throw new TypeError("Invalid output: output must be of type Array<string | number>");
+	}
+
+	function highlightOutput(output: (string | number)[], colIndex: number): HighlightCoordinates {
+		const set: Set<string | number> = new Set(output);
+		const classes = set.size < COLORS.length ? Object.fromEntries([...set].map((cls, i) => [cls, i])) : {};
+
+		return Object.fromEntries(
+			output.map((row, rowIndex) => {
+				const colorIndex = classes[row] ?? mod(rowIndex, COLORS.length);
+				const color = COLORS[colorIndex];
+				return [
+					`${rowIndex}-${colIndex}`,
+					`bg-${color}-100 border-${color}-100 dark:bg-${color}-800 dark:border-${color}-800`,
+				];
+			})
+		);
+	}
+
+	function applyInputSample(sample: WidgetExampleStructuredDataInput, opts: ExampleRunOpts = {}) {
+		table = convertDataToTable(sample.structured_data);
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isStructuredDataInput}
+	exampleQueryParams={["structured_data"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<div class="mt-4">
+				{#if table.length > 1 || table[1]?.length > 1}
+					<WidgetTableInput
+						{highlighted}
+						{isLoading}
+						{isDisabled}
+						onChange={onChangeTable}
+						table={tableWithOutput}
+						canAddCol={false}
+						bind:scrollTableToRight
+					/>
+				{/if}
+			</div>
+			<WidgetSubmitBtn
+				{isLoading}
+				{isDisabled}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom" />
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TextGenerationWidget/TextGenerationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextGenerationWidget/TextGenerationWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..9a23bb5ad9f5590c2021e3c9b9d232b1f0f7f69a
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextGenerationWidget/TextGenerationWidget.svelte
@@ -0,0 +1,268 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type {
+		WidgetExampleTextInput,
+		WidgetExampleOutputText,
+		WidgetExample,
+	} from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetShortcutRunLabel from "../../shared/WidgetShortcutRunLabel/WidgetShortcutRunLabel.svelte";
+	import WidgetBloomDecoding from "../../shared/WidgetBloomDecoding/WidgetBloomDecoding.svelte";
+	import WidgetTextarea from "../../shared/WidgetTextarea/WidgetTextarea.svelte";
+	import WidgetTimer from "../../shared/WidgetTimer/WidgetTimer.svelte";
+	import WidgetOutputText from "../../shared/WidgetOutputText/WidgetOutputText.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isValidOutputText } from "$lib/components/InferenceWidget/shared/outputValidation.js";
+	import { isTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+	import type { PipelineType } from "@huggingface/tasks";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	export let isLoggedIn: WidgetProps["includeCredentials"];
+
+	const isBloomLoginRequired = isLoggedIn === false && model.id === "bigscience/bloom";
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output = "";
+	let outputJson: string;
+	let text = "";
+	let warning: string = "";
+	let renderTypingEffect: (outputTxt: string) => Promise<void>;
+	let inferenceTimer: any;
+	let setTextAreaValue: (text: string) => void;
+	let decodingStrategy: "sampling" | "greedy" = "sampling";
+
+	// Deactivate server caching for these two pipeline types
+	// (translation uses this widget too and still needs caching)
+	const useCache = !(["text-generation", "text2text-generation"] as Array<PipelineType>).includes(
+		model.pipeline_tag as PipelineType
+	);
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		useCache = true,
+		exampleOutput = undefined,
+	}: InferenceRunOpts<WidgetExampleOutputText> = {}) {
+		if (isBloomLoginRequired) {
+			return;
+		}
+
+		if (exampleOutput) {
+			output = exampleOutput.text;
+			outputJson = "";
+			renderExampleOutput(output);
+			return;
+		}
+
+		const trimmedValue = text.trim();
+
+		if (!trimmedValue) {
+			error = "You need to input some text";
+			output = "";
+			outputJson = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({ text: trimmedValue });
+		}
+
+		const requestBody = { inputs: trimmedValue, parameters: {} as unknown };
+		addInferenceParameters(requestBody, model);
+
+		if (model.id === "bigscience/bloom") {
+			// see https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task
+			const parameters = {
+				seed: Date.now() % 100,
+				early_stopping: false,
+				length_penalty: 0.0,
+				max_new_tokens: 20,
+				...(decodingStrategy === "sampling" && {
+					top_p: 0.9,
+				}),
+				do_sample: decodingStrategy === "sampling",
+			};
+			requestBody["parameters"] = parameters;
+		}
+
+		isLoading = true;
+		inferenceTimer.start();
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall,
+			useCache
+		);
+
+		// Reset values
+		computeTime = "";
+		error = "";
+		warning = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = "";
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+			if (output.length === 0) {
+				warning = "No text was generated";
+			} else if (model?.pipeline_tag === "text-generation") {
+				const outputWithoutInput = output.slice(text.length);
+				inferenceTimer.stop();
+				if (outputWithoutInput.length === 0) {
+					warning = "No text was generated";
+				} else {
+					await renderTypingEffect(outputWithoutInput);
+				}
+			}
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true, useCache });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+
+		isLoading = false;
+		inferenceTimer.stop();
+	}
+
+	function parseOutput(body: unknown): string {
+		if (Array.isArray(body) && body.length) {
+			const firstEntry = body[0];
+			return (
+				firstEntry["generated_text"] ?? // text-generation + text2text-generation
+				firstEntry["translation_text"] ?? // translation
+				""
+			);
+		}
+		throw new TypeError("Invalid output: output must be of type Array & non-empty");
+	}
+
+	function renderExampleOutput(output: string) {
+		// if output doesn't start with space, add space in front of output
+		const prefix = /^\s/.test(output) ? "" : " ";
+		renderTypingEffect(prefix + output);
+	}
+
+	function applyInputSample(sample: WidgetExampleTextInput<WidgetExampleOutputText>, opts: ExampleRunOpts = {}) {
+		setTextAreaValue(sample.text);
+		if (opts.isPreview) {
+			if (sample.output) {
+				outputJson = "";
+				output = sample.output.text;
+				renderExampleOutput(output);
+			} else {
+				output = "";
+				outputJson = "";
+			}
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, useCache, exampleOutput });
+	}
+
+	function validateExample(sample: WidgetExample): sample is WidgetExampleTextInput<WidgetExampleOutputText> {
+		return isTextInput(sample) && (!sample.output || isValidOutputText(sample.output));
+	}
+
+	function redirectLogin() {
+		window.location.href = `/login?next=${encodeURIComponent(window.location.href)}`;
+	}
+
+	function redirectJoin() {
+		window.location.href = `/join?next=${encodeURIComponent(window.location.href)}`;
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	{validateExample}
+	exampleQueryParams={["text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form class="space-y-2">
+			<WidgetTextarea
+				bind:value={text}
+				bind:setValue={setTextAreaValue}
+				{isLoading}
+				{isDisabled}
+				size="big"
+				bind:renderTypingEffect
+			/>
+			{#if model.id === "bigscience/bloom"}
+				<WidgetBloomDecoding bind:decodingStrategy />
+			{/if}
+			<div class="flex items-center gap-x-2 {isBloomLoginRequired ? 'pointer-events-none opacity-50' : ''}">
+				<WidgetSubmitBtn
+					{isLoading}
+					{isDisabled}
+					onClick={() => {
+						getOutput({ useCache });
+					}}
+				/>
+				<WidgetShortcutRunLabel {isLoading} {isDisabled} />
+				<div class="ml-auto self-start">
+					<WidgetTimer bind:this={inferenceTimer} {isDisabled} />
+				</div>
+			</div>
+			{#if warning}
+				<div class="alert alert-warning mt-2">{warning}</div>
+			{/if}
+			{#if isBloomLoginRequired}
+				<div class="alert alert-warning mt-2">
+					<!-- svelte-ignore a11y-click-events-have-key-events -->
+					Please
+					<span class="cursor-pointer underline" on:click={redirectLogin}>login</span>
+					or
+					<!-- svelte-ignore a11y-click-events-have-key-events -->
+					<span class="cursor-pointer underline" on:click={redirectJoin}>register</span> to try BLOOM 🌸
+				</div>
+			{/if}
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if model?.pipeline_tag !== "text-generation"}
+			<!-- for pipelines: text2text-generation & translation -->
+			<WidgetOutputText classNames="mt-4" {output} />
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToImageWidget/TextToImageWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToImageWidget/TextToImageWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..ff2d09221de3c0400a2385eba4b1adcde6c38be1
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToImageWidget/TextToImageWidget.svelte
@@ -0,0 +1,155 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type {
+		WidgetExampleTextInput,
+		WidgetExampleOutputUrl,
+		WidgetExample,
+	} from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetQuickInput from "../../shared/WidgetQuickInput/WidgetQuickInput.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isValidOutputUrl } from "$lib/components/InferenceWidget/shared/outputValidation.js";
+	import { isTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output = "";
+	let outputJson = "";
+	let text = "";
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		useCache = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts<WidgetExampleOutputUrl> = {}) {
+		if (exampleOutput) {
+			output = exampleOutput.url;
+			return;
+		}
+
+		const trimmedText = text.trim();
+
+		if (!trimmedText) {
+			error = "You need to input some text";
+			output = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({ text: trimmedText });
+		}
+
+		const requestBody = { inputs: trimmedText };
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall,
+			useCache
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = "";
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error || `Error encountered on input "${trimmedText}"`;
+		}
+	}
+
+	function parseOutput(body: unknown): string {
+		if (body && typeof body === "object" && body instanceof Blob) {
+			return URL.createObjectURL(body);
+		}
+		throw new TypeError("Invalid output: output must be of type object & of instance Blob");
+	}
+
+	function applyInputSample(sample: WidgetExampleTextInput<WidgetExampleOutputUrl>, opts: ExampleRunOpts = {}) {
+		text = sample.text;
+		if (opts.isPreview) {
+			if (sample.output) {
+				output = sample.output.url;
+			} else {
+				output = "";
+			}
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+
+	function validateExample(sample: WidgetExample): sample is WidgetExampleTextInput<WidgetExampleOutputUrl> {
+		return isTextInput(sample) && (!sample.output || isValidOutputUrl(sample.output));
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	{validateExample}
+	exampleQueryParams={["text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<WidgetQuickInput bind:value={text} {isLoading} {isDisabled} onClickSubmitBtn={() => getOutput()} />
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if output.length}
+			<div class="mt-4 flex justify-center bg-gray-50 dark:bg-gray-925">
+				<img class="max-w-sm object-contain" src={output} alt="" />
+			</div>
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToSpeechWidget/TextToSpeechWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToSpeechWidget/TextToSpeechWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..27c174be9671ac1460501249ba625a021e59fb3c
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToSpeechWidget/TextToSpeechWidget.svelte
@@ -0,0 +1,143 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleTextInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetAudioTrack from "../../shared/WidgetAudioTrack/WidgetAudioTrack.svelte";
+	import WidgetTextarea from "../../shared/WidgetTextarea/WidgetTextarea.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output = "";
+	let outputJson = "";
+	let text = "";
+	let setTextAreaValue: (text: string) => void;
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedText = text.trim();
+
+		if (!trimmedText) {
+			error = "You need to input some text";
+			output = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({ text: trimmedText });
+		}
+
+		const requestBody = { inputs: trimmedText };
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = "";
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: unknown): string {
+		if (body && typeof body === "object" && body instanceof Blob) {
+			return URL.createObjectURL(body);
+		}
+		throw new TypeError("Invalid output: output must be of type object & instance of Blob");
+	}
+
+	function applyInputSample(sample: WidgetExampleTextInput, opts: ExampleRunOpts = {}) {
+		setTextAreaValue(sample.text);
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isTextInput}
+	exampleQueryParams={["text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<WidgetTextarea bind:value={text} bind:setValue={setTextAreaValue} {isDisabled} />
+			<WidgetSubmitBtn
+				classNames="mt-2"
+				{isLoading}
+				{isDisabled}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if output.length}
+			<WidgetAudioTrack classNames="mt-4" src={output} />
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TokenClassificationWidget/TokenClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TokenClassificationWidget/TokenClassificationWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..f0f9fb25834fb45903340cdbc737ec834b1b49d5
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TokenClassificationWidget/TokenClassificationWidget.svelte
@@ -0,0 +1,265 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleTextInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetOuputTokens from "../../shared/WidgetOutputTokens/WidgetOutputTokens.svelte";
+	import WidgetTextarea from "../../shared/WidgetTextarea/WidgetTextarea.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+	import { uniqBy } from "$lib/utils/ViewUtils.js";
+
+	interface EntityGroup {
+		entity_group: string;
+		score: number;
+		word: string;
+		start?: number;
+		end?: number;
+	}
+
+	interface Span {
+		end: number;
+		index?: string;
+		start: number;
+		type: string;
+	}
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: Span[] = [];
+	let outputJson: string;
+	let text = "";
+	let outputText = "";
+	let warning: string = "";
+	let setTextAreaValue: (text: string) => void;
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedText = text.trim();
+
+		if (!trimmedText) {
+			error = "You need to input some text";
+			output = [];
+			outputJson = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({ text: trimmedText });
+		}
+
+		const requestBody = { inputs: trimmedText };
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		warning = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = [];
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+			outputText = text;
+			if (output.length === 0) {
+				warning = "No token was detected";
+			}
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function isValidOutput(arg: any): arg is EntityGroup[] {
+		return (
+			Array.isArray(arg) &&
+			arg.every((x) => {
+				return typeof x.word === "string" && typeof x.entity_group === "string" && typeof x.score === "number";
+			})
+		);
+	}
+
+	function parseOutput(body: unknown): Span[] {
+		if (isValidOutput(body)) {
+			// Filter out duplicates
+			const filteredEntries = uniqBy(body, (val) => JSON.stringify(val));
+
+			const spans: Span[] = [];
+			for (const entry of filteredEntries) {
+				const span = getSpanData(entry, spans, text);
+				if (span) {
+					spans.push(span);
+				}
+			}
+
+			spans.sort((a, b) => {
+				/// `a` should come first when the result is < 0
+				return a.start === b.start
+					? b.end - a.end /// CAUTION.
+					: a.start - b.start;
+			});
+
+			// Check existence of **strict overlapping**
+			for (let i = 0; i < spans.length; i++) {
+				if (i < spans.length - 1) {
+					const s = spans[i];
+					const sNext = spans[i + 1];
+					if (s.start < sNext.start && s.end > sNext.start) {
+						console.warn("ERROR", "Spans: strict overlapping");
+					}
+				}
+			}
+
+			return spans;
+		}
+		throw new TypeError("Invalid output: output must be of type Array<word:string; entity_group:string; score:number>");
+	}
+
+	function getSpanData(entityGroup: EntityGroup, spans: Span[], text: string): Span | null {
+		// When the API returns start/end information
+		if (entityGroup.start && entityGroup.end) {
+			const span = {
+				type: entityGroup.entity_group,
+				start: entityGroup.start,
+				end: entityGroup.end,
+			};
+			return !spans.some((x) => equals(x, span)) ? span : null;
+		}
+
+		// This is a fallback when the API doesn't return
+		// start/end information (when using python tokenizers for instance).
+		const normalizedText = text.toLowerCase();
+
+		let needle = entityGroup.word.toLowerCase();
+		let idx = 0;
+		while (idx !== -1) {
+			idx = normalizedText.indexOf(needle, idx);
+			if (idx === -1) {
+				break;
+			}
+			const span: Span = {
+				type: entityGroup.entity_group,
+				start: idx,
+				end: idx + needle.length,
+			};
+			if (!spans.some((x) => equals(x, span))) {
+				return span;
+			}
+			idx++;
+		}
+
+		// Fix for incorrect detokenization in this pipeline.
+		// e.g. John - Claude
+		// todo: Fix upstream.
+		needle = entityGroup.word.toLowerCase().replace(/ /g, "");
+		idx = 0;
+		while (idx !== -1) {
+			idx = normalizedText.indexOf(needle, idx);
+			if (idx === -1) {
+				break;
+			}
+			const span: Span = {
+				type: entityGroup.entity_group,
+				start: idx,
+				end: idx + needle.length,
+			};
+			if (!spans.some((x) => equals(x, span))) {
+				return span;
+			}
+		}
+		return null;
+	}
+
+	function equals(a: Span, b: Span): boolean {
+		return a.type === b.type && a.start === b.start && a.end === b.end;
+	}
+
+	function applyInputSample(sample: WidgetExampleTextInput, opts: ExampleRunOpts = {}) {
+		setTextAreaValue(sample.text);
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isTextInput}
+	exampleQueryParams={["text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form>
+			<WidgetTextarea bind:value={text} bind:setValue={setTextAreaValue} {isDisabled} />
+			<WidgetSubmitBtn
+				classNames="mt-2"
+				{isLoading}
+				{isDisabled}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+			{#if warning}
+				<div class="alert alert-warning mt-2">{warning}</div>
+			{/if}
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		<WidgetOuputTokens classNames="mt-2" {output} text={outputText} />
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/VisualQuestionAnsweringWidget/VisualQuestionAnsweringWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/VisualQuestionAnsweringWidget/VisualQuestionAnsweringWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..b37c6ea6d3042b5429b684a78bff1a6ec961133e
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/VisualQuestionAnsweringWidget/VisualQuestionAnsweringWidget.svelte
@@ -0,0 +1,204 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleAssetAndTextInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetDropzone from "../../shared/WidgetDropzone/WidgetDropzone.svelte";
+	import WidgetQuickInput from "../../shared/WidgetQuickInput/WidgetQuickInput.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import WidgetOutputChart from "../../shared/WidgetOutputChart/WidgetOutputChart.svelte";
+	import { addInferenceParameters, callInferenceApi } from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isAssetAndTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: Array<{ answer: string; score: number }> | null = [];
+	let outputJson: string;
+	let question = "";
+	let imgSrc = "";
+	let imageBase64 = "";
+
+	async function onSelectFile(file: File | Blob) {
+		imgSrc = URL.createObjectURL(file);
+		await updateImageBase64(file);
+	}
+
+	function updateImageBase64(file: File | Blob): Promise<void> {
+		return new Promise((resolve, reject) => {
+			const fileReader: FileReader = new FileReader();
+			fileReader.onload = async () => {
+				try {
+					const imageBase64WithPrefix: string = fileReader.result as string;
+					imageBase64 = imageBase64WithPrefix.split(",")[1]; // remove prefix
+					isLoading = false;
+					resolve();
+				} catch (err) {
+					reject(err);
+				}
+			};
+			fileReader.onerror = (e) => reject(e);
+			isLoading = true;
+			fileReader.readAsDataURL(file);
+		});
+	}
+
+	function isValidOutput(arg: any): arg is { answer: string; score: number }[] {
+		return Array.isArray(arg) && arg.every((x) => typeof x.answer === "string" && typeof x.score === "number");
+	}
+
+	function parseOutput(body: unknown): Array<{ answer: string; score: number }> {
+		if (isValidOutput(body)) {
+			return body;
+		}
+		throw new TypeError("Invalid output: output must be of type Array<answer: string, score:number>");
+	}
+
+	async function applyInputSample(sample: WidgetExampleAssetAndTextInput, opts: ExampleRunOpts = {}) {
+		question = sample.text;
+		imgSrc = sample.src;
+		if (opts.isPreview) {
+			return;
+		}
+		const res = await fetch(imgSrc);
+		const blob = await res.blob();
+		await updateImageBase64(blob);
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedQuestion = question.trim();
+
+		if (!trimmedQuestion) {
+			error = "You need to input a question";
+			output = null;
+			outputJson = "";
+			return;
+		}
+
+		if (!imageBase64) {
+			error = "You need to upload an image";
+			output = null;
+			outputJson = "";
+			return;
+		}
+
+		const requestBody = {
+			inputs: { question: trimmedQuestion, image: imageBase64 },
+		};
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = null;
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isAssetAndTextInput}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form class="space-y-2">
+			<WidgetDropzone
+				classNames="hidden md:block"
+				{isLoading}
+				{isDisabled}
+				{imgSrc}
+				{onSelectFile}
+				onError={(e) => (error = e)}
+			>
+				{#if imgSrc}
+					<img src={imgSrc} class="pointer-events-none mx-auto max-h-44 shadow" alt="" />
+				{/if}
+			</WidgetDropzone>
+			<!-- Better UX for mobile/table through CSS breakpoints -->
+			{#if imgSrc}
+				{#if imgSrc}
+					<div class="mb-2 flex justify-center bg-gray-50 dark:bg-gray-900 md:hidden">
+						<img src={imgSrc} class="pointer-events-none max-h-44" alt="" />
+					</div>
+				{/if}
+			{/if}
+			<WidgetFileInput
+				accept="image/*"
+				classNames="mr-2 md:hidden"
+				{isLoading}
+				{isDisabled}
+				label="Browse for image"
+				{onSelectFile}
+			/>
+			<WidgetQuickInput
+				bind:value={question}
+				{isLoading}
+				{isDisabled}
+				onClickSubmitBtn={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if output}
+			<WidgetOutputChart labelField="answer" classNames="pt-4" {output} />
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..cb2d4ee898634ceef4875126c74f375ced0477bd
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte
@@ -0,0 +1,228 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleAssetAndZeroShotInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import { onMount } from "svelte";
+
+	import WidgetFileInput from "../../shared/WidgetFileInput/WidgetFileInput.svelte";
+	import WidgetDropzone from "../../shared/WidgetDropzone/WidgetDropzone.svelte";
+	import WidgetTextInput from "../../shared/WidgetTextInput/WidgetTextInput.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import WidgetOutputChart from "../../shared/WidgetOutputChart/WidgetOutputChart.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		getWidgetExample,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isAssetAndZeroShotInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	let candidateLabels = "";
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let output: Array<{ label: string; score: number }> = [];
+	let outputJson: string;
+	let imgSrc = "";
+	let imageBase64 = "";
+
+	async function onSelectFile(file: File | Blob) {
+		imgSrc = URL.createObjectURL(file);
+		await updateImageBase64(file);
+	}
+
+	function updateImageBase64(file: File | Blob): Promise<void> {
+		return new Promise((resolve, reject) => {
+			const fileReader: FileReader = new FileReader();
+			fileReader.onload = async () => {
+				try {
+					const imageBase64WithPrefix: string = fileReader.result as string;
+					imageBase64 = imageBase64WithPrefix.split(",")[1]; // remove prefix
+					isLoading = false;
+					resolve();
+				} catch (err) {
+					reject(err);
+				}
+			};
+			fileReader.onerror = (e) => reject(e);
+			isLoading = true;
+			fileReader.readAsDataURL(file);
+		});
+	}
+
+	function isValidOutput(arg: any): arg is { label: string; score: number }[] {
+		return Array.isArray(arg) && arg.every((x) => typeof x.label === "string" && typeof x.score === "number");
+	}
+
+	function parseOutput(body: unknown): Array<{ label: string; score: number }> {
+		if (isValidOutput(body)) {
+			return body;
+		}
+		throw new TypeError("Invalid output: output must be of type <labels:Array; scores:Array>");
+	}
+
+	async function applyInputSample(sample: WidgetExampleAssetAndZeroShotInput, opts: ExampleRunOpts = {}) {
+		candidateLabels = sample.candidate_labels;
+		imgSrc = sample.src;
+		if (opts.isPreview) {
+			return;
+		}
+		const res = await fetch(imgSrc);
+		const blob = await res.blob();
+		await updateImageBase64(blob);
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedCandidateLabels = candidateLabels.trim().split(",").join(",");
+
+		if (!trimmedCandidateLabels) {
+			error = "You need to input at least one label";
+			output = [];
+			outputJson = "";
+			return;
+		}
+
+		if (!imageBase64) {
+			error = "You need to upload an image";
+			output = [];
+			outputJson = "";
+			return;
+		}
+
+		const requestBody = {
+			image: imageBase64,
+			parameters: {
+				candidate_labels: trimmedCandidateLabels,
+			},
+		};
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = [];
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	onMount(() => {
+		(async () => {
+			const example = getWidgetExample<WidgetExampleAssetAndZeroShotInput>(model, isAssetAndZeroShotInput);
+			if (callApiOnMount && example) {
+				await applyInputSample(example, { inferenceOpts: { isOnLoadCall: true } });
+			}
+		})();
+	});
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isAssetAndZeroShotInput}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form class="space-y-2">
+			<WidgetDropzone
+				classNames="hidden md:block"
+				{isLoading}
+				{isDisabled}
+				{imgSrc}
+				{onSelectFile}
+				onError={(e) => (error = e)}
+			>
+				{#if imgSrc}
+					<img src={imgSrc} class="pointer-events-none mx-auto max-h-44 shadow" alt="" />
+				{/if}
+			</WidgetDropzone>
+			<!-- Better UX for mobile/table through CSS breakpoints -->
+			{#if imgSrc}
+				{#if imgSrc}
+					<div class="mb-2 flex justify-center bg-gray-50 dark:bg-gray-900 md:hidden">
+						<img src={imgSrc} class="pointer-events-none max-h-44" alt="" />
+					</div>
+				{/if}
+			{/if}
+			<WidgetFileInput
+				accept="image/*"
+				classNames="mr-2 md:hidden"
+				{isLoading}
+				{isDisabled}
+				label="Browse for image"
+				{onSelectFile}
+			/>
+			<WidgetTextInput
+				bind:value={candidateLabels}
+				{isDisabled}
+				label="Possible class names (comma-separated)"
+				placeholder="Possible class names..."
+			/>
+			<WidgetSubmitBtn
+				{isLoading}
+				{isDisabled}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if output.length}
+			<WidgetOutputChart classNames="pt-4" {output} />
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShowClassificationWidget/ZeroShotClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShowClassificationWidget/ZeroShotClassificationWidget.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..d3742b8ca2ada1f0136239401ee22b0eafc4b671
--- /dev/null
+++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShowClassificationWidget/ZeroShotClassificationWidget.svelte
@@ -0,0 +1,200 @@
+<script lang="ts">
+	import type { WidgetProps, ExampleRunOpts, InferenceRunOpts } from "$lib/components/InferenceWidget/shared/types.js";
+	import type { WidgetExampleZeroShotTextInput } from "$lib/components/InferenceWidget/shared/WidgetExample.js";
+
+	import WidgetCheckbox from "../../shared/WidgetCheckbox/WidgetCheckbox.svelte";
+	import WidgetOutputChart from "../../shared/WidgetOutputChart/WidgetOutputChart.svelte";
+	import WidgetSubmitBtn from "../../shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte";
+	import WidgetTextarea from "../../shared/WidgetTextarea/WidgetTextarea.svelte";
+	import WidgetTextInput from "../../shared/WidgetTextInput/WidgetTextInput.svelte";
+	import WidgetWrapper from "../../shared/WidgetWrapper/WidgetWrapper.svelte";
+	import {
+		addInferenceParameters,
+		callInferenceApi,
+		updateUrl,
+	} from "$lib/components/InferenceWidget/shared/helpers.js";
+	import { isZeroShotTextInput } from "$lib/components/InferenceWidget/shared/inputValidation.js";
+
+	export let apiToken: WidgetProps["apiToken"];
+	export let apiUrl: WidgetProps["apiUrl"];
+	export let callApiOnMount: WidgetProps["callApiOnMount"];
+	export let model: WidgetProps["model"];
+	export let noTitle: WidgetProps["noTitle"];
+	export let shouldUpdateUrl: WidgetProps["shouldUpdateUrl"];
+	export let includeCredentials: WidgetProps["includeCredentials"];
+	let isDisabled = false;
+
+	let candidateLabels = "";
+	let computeTime = "";
+	let error: string = "";
+	let isLoading = false;
+	let modelLoading = {
+		isLoading: false,
+		estimatedTime: 0,
+	};
+	let multiClass = false;
+	let output: Array<{ label: string; score: number }> = [];
+	let outputJson: string;
+	let text = "";
+	let warning: string = "";
+	let setTextAreaValue: (text: string) => void;
+
+	async function getOutput({
+		withModelLoading = false,
+		isOnLoadCall = false,
+		exampleOutput = undefined,
+	}: InferenceRunOpts = {}) {
+		const trimmedText = text.trim();
+		const trimmedCandidateLabels = candidateLabels.trim().split(",").join(",");
+
+		if (!trimmedText) {
+			error = "You need to input some text";
+			output = [];
+			outputJson = "";
+			return;
+		}
+
+		if (!trimmedCandidateLabels) {
+			error = "You need to input at least one label";
+			output = [];
+			outputJson = "";
+			return;
+		}
+
+		if (shouldUpdateUrl && !isOnLoadCall) {
+			updateUrl({
+				candidate_labels: trimmedCandidateLabels,
+				multi_class: multiClass ? "true" : "false",
+				text: trimmedText,
+			});
+		}
+
+		const requestBody = {
+			inputs: trimmedText,
+			parameters: {
+				candidate_labels: trimmedCandidateLabels,
+				multi_class: multiClass,
+			},
+		};
+		addInferenceParameters(requestBody, model);
+
+		isLoading = true;
+
+		const res = await callInferenceApi(
+			apiUrl,
+			model.id,
+			requestBody,
+			apiToken,
+			parseOutput,
+			withModelLoading,
+			includeCredentials,
+			isOnLoadCall
+		);
+
+		isLoading = false;
+		// Reset values
+		computeTime = "";
+		error = "";
+		warning = "";
+		modelLoading = { isLoading: false, estimatedTime: 0 };
+		output = [];
+		outputJson = "";
+
+		if (res.status === "success") {
+			computeTime = res.computeTime;
+			output = res.output;
+			outputJson = res.outputJson;
+			if (output.length === 0) {
+				warning = "No classes were detected";
+			}
+		} else if (res.status === "loading-model") {
+			modelLoading = {
+				isLoading: true,
+				estimatedTime: res.estimatedTime,
+			};
+			getOutput({ withModelLoading: true });
+		} else if (res.status === "error") {
+			error = res.error;
+		}
+	}
+
+	function parseOutput(body: unknown): Array<{ label: string; score: number }> {
+		if (
+			body &&
+			typeof body === "object" &&
+			"labels" in body &&
+			Array.isArray(body["labels"]) &&
+			"scores" in body &&
+			Array.isArray(body["scores"])
+		) {
+			const scores = body["scores"];
+			return body["labels"]
+				.filter((_, i) => scores[i] !== null || scores[i] !== undefined)
+				.map((x, i) => ({
+					label: x ?? "",
+					score: scores[i] ?? 0,
+				}));
+		}
+		throw new TypeError("Invalid output: output must be of type <labels:Array; scores:Array>");
+	}
+
+	function applyInputSample(sample: WidgetExampleZeroShotTextInput, opts: ExampleRunOpts = {}) {
+		candidateLabels = sample.candidate_labels;
+		multiClass = sample.multi_class;
+		setTextAreaValue(sample.text);
+		if (opts.isPreview) {
+			return;
+		}
+		const exampleOutput = sample.output;
+		getOutput({ ...opts.inferenceOpts, exampleOutput });
+	}
+</script>
+
+<WidgetWrapper
+	{callApiOnMount}
+	{apiUrl}
+	{includeCredentials}
+	{applyInputSample}
+	{computeTime}
+	{error}
+	{isLoading}
+	{model}
+	{modelLoading}
+	{noTitle}
+	{outputJson}
+	validateExample={isZeroShotTextInput}
+	exampleQueryParams={["candidate_labels", "multi_class", "text"]}
+>
+	<svelte:fragment slot="top" let:isDisabled>
+		<form class="flex flex-col space-y-2">
+			<WidgetTextarea
+				bind:value={text}
+				bind:setValue={setTextAreaValue}
+				{isDisabled}
+				placeholder="Text to classify..."
+			/>
+			<WidgetTextInput
+				bind:value={candidateLabels}
+				{isDisabled}
+				label="Possible class names (comma-separated)"
+				placeholder="Possible class names..."
+			/>
+			<WidgetCheckbox bind:checked={multiClass} label="Allow multiple true classes" />
+			<WidgetSubmitBtn
+				{isLoading}
+				{isDisabled}
+				onClick={() => {
+					getOutput();
+				}}
+			/>
+			{#if warning}
+				<div class="alert alert-warning mt-2">{warning}</div>
+			{/if}
+		</form>
+	</svelte:fragment>
+	<svelte:fragment slot="bottom">
+		{#if output.length}
+			<WidgetOutputChart classNames="pt-4" {output} />
+		{/if}
+	</svelte:fragment>
+</WidgetWrapper>
diff --git a/packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte b/packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..9900197635e123eaaa27bdcd4a448a7716cde63f
--- /dev/null
+++ b/packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte
@@ -0,0 +1,89 @@
+<script lang="ts">
+	import type { SvelteComponent } from "svelte";
+
+	import IconAudioClassification from "../Icons/IconAudioClassification.svelte";
+	import IconAudioToAudio from "../Icons/IconAudioToAudio.svelte";
+	import IconAutomaticSpeechRecognition from "../Icons/IconAutomaticSpeechRecognition.svelte";
+	import IconConversational from "../Icons/IconConversational.svelte";
+	import IconFeatureExtraction from "../Icons/IconFeatureExtraction.svelte";
+	import IconFillMask from "../Icons/IconFillMask.svelte";
+	import IconImageClassification from "../Icons/IconImageClassification.svelte";
+	import IconImageSegmentation from "../Icons/IconImageSegmentation.svelte";
+	import IconObjectDetection from "../Icons/IconObjectDetection.svelte";
+	import IconDepthEstimation from "../Icons/IconDepthEstimation.svelte";
+	import IconVideoClassification from "../Icons/IconVideoClassification.svelte";
+	import IconQuestionAnswering from "../Icons/IconQuestionAnswering.svelte";
+	import IconSentenceSimilarity from "../Icons/IconSentenceSimilarity.svelte";
+	import IconTabularClassification from "../Icons/IconTabularClassification.svelte";
+	import IconTabularRegression from "../Icons/IconTabularRegression.svelte";
+	import IconSummarization from "../Icons/IconSummarization.svelte";
+	import IconTableQuestionAnswering from "../Icons/IconTableQuestionAnswering.svelte";
+	import IconText2textGeneration from "../Icons/IconText2textGeneration.svelte";
+	import IconTextClassification from "../Icons/IconTextClassification.svelte";
+	import IconTextGeneration from "../Icons/IconTextGeneration.svelte";
+	import IconTextToImage from "../Icons/IconTextToImage.svelte";
+	import IconImageToText from "../Icons/IconImageToText.svelte";
+	import IconTextToSpeech from "../Icons/IconTextToSpeech.svelte";
+	import IconTextToVideo from "../Icons/IconTextToVideo.svelte";
+	import IconTokenClassification from "../Icons/IconTokenClassification.svelte";
+	import IconTranslation from "../Icons/IconTranslation.svelte";
+	import IconVoiceActivityDetection from "../Icons/IconVoiceActivityDetection.svelte";
+	import IconZeroShotClassification from "../Icons/IconZeroShotClassification.svelte";
+	import IconReinforcementLearning from "../Icons/IconReinforcementLearning.svelte";
+	import IconRobotics from "../Icons/IconRobotics.svelte";
+	import IconImageToImage from "../Icons/IconImageToImage.svelte";
+	import IconUnconditionalImageGeneration from "../Icons/IconUnconditionalImageGeneration.svelte";
+	import IconDocumentQuestionAnswering from "../Icons/IconDocumentQuestionAnswering.svelte";
+	import IconGraphML from "../Icons/IconGraphML.svelte";
+	import type { PipelineType } from "@huggingface/tasks";
+
+	export let classNames = "";
+	export let pipeline = "";
+
+	const ICON_COMPONENTS: {
+		[key in PipelineType]?: typeof SvelteComponent;
+	} = {
+		/// Keep same order as in huggingface_hub/Types.ts
+		/// for easy mapping.
+		"text-classification": IconTextClassification,
+		"token-classification": IconTokenClassification,
+		"table-question-answering": IconTableQuestionAnswering,
+		"question-answering": IconQuestionAnswering,
+		"zero-shot-classification": IconZeroShotClassification,
+		translation: IconTranslation,
+		summarization: IconSummarization,
+		conversational: IconConversational,
+		"feature-extraction": IconFeatureExtraction,
+		"text-generation": IconTextGeneration,
+		"text2text-generation": IconText2textGeneration,
+		"fill-mask": IconFillMask,
+		"sentence-similarity": IconSentenceSimilarity,
+		"text-to-speech": IconTextToSpeech,
+		"text-to-audio": IconTextToSpeech,
+		"automatic-speech-recognition": IconAutomaticSpeechRecognition,
+		"audio-to-audio": IconAudioToAudio,
+		"audio-classification": IconAudioClassification,
+		"voice-activity-detection": IconVoiceActivityDetection,
+		"depth-estimation": IconDepthEstimation,
+		"image-classification": IconImageClassification,
+		"object-detection": IconObjectDetection,
+		"video-classification": IconVideoClassification,
+		"image-segmentation": IconImageSegmentation,
+		"text-to-image": IconTextToImage,
+		"image-to-text": IconImageToText,
+		"image-to-image": IconImageToImage,
+		"unconditional-image-generation": IconUnconditionalImageGeneration,
+		"reinforcement-learning": IconReinforcementLearning,
+		robotics: IconRobotics,
+		"graph-ml": IconGraphML,
+		"tabular-classification": IconTabularClassification,
+		"tabular-regression": IconTabularRegression,
+		"text-to-video": IconTextToVideo,
+		"document-question-answering": IconDocumentQuestionAnswering,
+	};
+
+	$: iconComponent =
+		pipeline in ICON_COMPONENTS ? ICON_COMPONENTS[pipeline as keyof typeof ICON_COMPONENTS] : IconFillMask;
+</script>
+
+<svelte:component this={iconComponent} {classNames} />
diff --git a/packages/widgets/src/lib/components/PipelineTag/PipelineTag.svelte b/packages/widgets/src/lib/components/PipelineTag/PipelineTag.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..dc2afad8a113e4c670ffa88a37a7c68a2e48bce6
--- /dev/null
+++ b/packages/widgets/src/lib/components/PipelineTag/PipelineTag.svelte
@@ -0,0 +1,16 @@
+<script lang="ts">
+	import { PIPELINE_DATA } from "@huggingface/tasks";
+	import PipelineIcon from "../PipelineIcon/PipelineIcon.svelte";
+
+	export let classNames = "";
+	export let pipeline = "";
+
+	$: pipelineData = pipeline in PIPELINE_DATA ? PIPELINE_DATA[pipeline as keyof typeof PIPELINE_DATA] : undefined;
+</script>
+
+<div class="inline-flex items-center {classNames}">
+	<PipelineIcon classNames="mr-1" {pipeline} />
+	<span>
+		{pipelineData ? pipelineData.name : pipeline}
+	</span>
+</div>
diff --git a/packages/widgets/src/lib/index.ts b/packages/widgets/src/lib/index.ts
new file mode 100644
index 0000000000000000000000000000000000000000..a37f99cf4ef61455cff91195fe1f7fdc83a89f12
--- /dev/null
+++ b/packages/widgets/src/lib/index.ts
@@ -0,0 +1,38 @@
+import InferenceWidget from "./components/InferenceWidget/InferenceWidget.svelte";
+import WidgetOutputChart from "./components/InferenceWidget/shared/WidgetOutputChart/WidgetOutputChart.svelte";
+import WidgetOutputTokens from "./components/InferenceWidget/shared/WidgetOutputTokens/WidgetOutputTokens.svelte";
+import PipelineIcon from "./components/PipelineIcon/PipelineIcon.svelte";
+import { modelLoadStates } from "./components/InferenceWidget/stores.js";
+import { InferenceDisplayability } from "./interfaces/InferenceDisplayability.js";
+import * as serveCurl from "./inferenceSnippets/serveCurl.js";
+import * as serveJs from "./inferenceSnippets/serveJs.js";
+import * as servePython from "./inferenceSnippets/servePython.js";
+import * as snippetInputs from "./inferenceSnippets/inputs.js";
+import { MODEL_LIBRARIES_UI_ELEMENTS } from "./interfaces/Libraries.js";
+import type { LibraryUiElement } from "./interfaces/Libraries.js";
+import type { TransformersInfo } from "./interfaces/Types.js";
+import { MAPPING_DEFAULT_WIDGET } from "./interfaces/DefaultWidget.js";
+import { LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS } from "./interfaces/LibrariesToTasks.js";
+
+export {
+	InferenceWidget,
+	WidgetOutputChart,
+	WidgetOutputTokens,
+	modelLoadStates,
+	InferenceDisplayability,
+	PipelineIcon,
+	serveCurl,
+	serveJs,
+	servePython,
+	snippetInputs,
+	MODEL_LIBRARIES_UI_ELEMENTS,
+	MAPPING_DEFAULT_WIDGET,
+	LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS,
+};
+export type {
+	WidgetExample,
+	WidgetExampleOutput,
+	WidgetExampleOutputUrl,
+	WidgetExampleTextInput,
+} from "./components/InferenceWidget/shared/WidgetExample.js";
+export type { LibraryUiElement, TransformersInfo };
diff --git a/packages/widgets/src/lib/inferenceSnippets/inputs.ts b/packages/widgets/src/lib/inferenceSnippets/inputs.ts
new file mode 100644
index 0000000000000000000000000000000000000000..64e8c4c4173579dda9f1e3a9f83265b6903bbe96
--- /dev/null
+++ b/packages/widgets/src/lib/inferenceSnippets/inputs.ts
@@ -0,0 +1,129 @@
+import type { PipelineType } from "@huggingface/tasks";
+import type { ModelData } from "../interfaces/Types.js";
+
+const inputsZeroShotClassification = () =>
+	`"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"`;
+
+const inputsTranslation = () => `"Меня зовут Вольфганг и я живу в Берлине"`;
+
+const inputsSummarization = () =>
+	`"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`;
+
+const inputsConversational = () =>
+	`{
+		"past_user_inputs": ["Which movie is the best ?"],
+		"generated_responses": ["It is Die Hard for sure."],
+		"text": "Can you explain why ?"
+	}`;
+
+const inputsTableQuestionAnswering = () =>
+	`{
+		"query": "How many stars does the transformers repository have?",
+		"table": {
+			"Repository": ["Transformers", "Datasets", "Tokenizers"],
+			"Stars": ["36542", "4512", "3934"],
+			"Contributors": ["651", "77", "34"],
+			"Programming language": [
+				"Python",
+				"Python",
+				"Rust, Python and NodeJS"
+			]
+		}
+	}`;
+
+const inputsQuestionAnswering = () =>
+	`{
+		"question": "What is my name?",
+		"context": "My name is Clara and I live in Berkeley."
+	}`;
+
+const inputsTextClassification = () => `"I like you. I love you"`;
+
+const inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`;
+
+const inputsTextGeneration = () => `"Can you please let us know more details about your "`;
+
+const inputsText2TextGeneration = () => `"The answer to the universe is"`;
+
+const inputsFillMask = (model: ModelData) => `"The answer to the universe is ${model.mask_token}."`;
+
+const inputsSentenceSimilarity = () =>
+	`{
+		"source_sentence": "That is a happy person",
+		"sentences": [
+			"That is a happy dog",
+			"That is a very happy person",
+			"Today is a sunny day"
+		]
+	}`;
+
+const inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`;
+
+const inputsImageClassification = () => `"cats.jpg"`;
+
+const inputsImageToText = () => `"cats.jpg"`;
+
+const inputsImageSegmentation = () => `"cats.jpg"`;
+
+const inputsObjectDetection = () => `"cats.jpg"`;
+
+const inputsAudioToAudio = () => `"sample1.flac"`;
+
+const inputsAudioClassification = () => `"sample1.flac"`;
+
+const inputsTextToImage = () => `"Astronaut riding a horse"`;
+
+const inputsTextToSpeech = () => `"The answer to the universe is 42"`;
+
+const inputsTextToAudio = () => `"liquid drum and bass, atmospheric synths, airy sounds"`;
+
+const inputsAutomaticSpeechRecognition = () => `"sample1.flac"`;
+
+const modelInputSnippets: {
+	[key in PipelineType]?: (model: ModelData) => string;
+} = {
+	"audio-to-audio": inputsAudioToAudio,
+	"audio-classification": inputsAudioClassification,
+	"automatic-speech-recognition": inputsAutomaticSpeechRecognition,
+	conversational: inputsConversational,
+	"feature-extraction": inputsFeatureExtraction,
+	"fill-mask": inputsFillMask,
+	"image-classification": inputsImageClassification,
+	"image-to-text": inputsImageToText,
+	"image-segmentation": inputsImageSegmentation,
+	"object-detection": inputsObjectDetection,
+	"question-answering": inputsQuestionAnswering,
+	"sentence-similarity": inputsSentenceSimilarity,
+	summarization: inputsSummarization,
+	"table-question-answering": inputsTableQuestionAnswering,
+	"text-classification": inputsTextClassification,
+	"text-generation": inputsTextGeneration,
+	"text-to-image": inputsTextToImage,
+	"text-to-speech": inputsTextToSpeech,
+	"text-to-audio": inputsTextToAudio,
+	"text2text-generation": inputsText2TextGeneration,
+	"token-classification": inputsTokenClassification,
+	translation: inputsTranslation,
+	"zero-shot-classification": inputsZeroShotClassification,
+};
+
+// Use noWrap to put the whole snippet on a single line (removing new lines and tabulations)
+// Use noQuotes to strip quotes from start & end (example: "abc" -> abc)
+export function getModelInputSnippet(model: ModelData, noWrap = false, noQuotes = false): string {
+	if (model.pipeline_tag) {
+		const inputs = modelInputSnippets[model.pipeline_tag];
+		if (inputs) {
+			let result = inputs(model);
+			if (noWrap) {
+				result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " ");
+			}
+			if (noQuotes) {
+				const REGEX_QUOTES = /^"(.+)"$/s;
+				const match = result.match(REGEX_QUOTES);
+				result = match ? match[1] : result;
+			}
+			return result;
+		}
+	}
+	return "No input example has been defined for this model task.";
+}
diff --git a/packages/widgets/src/lib/inferenceSnippets/serveCurl.ts b/packages/widgets/src/lib/inferenceSnippets/serveCurl.ts
new file mode 100644
index 0000000000000000000000000000000000000000..7a801e934f84c7843ab074df1f48a7e679d7a36c
--- /dev/null
+++ b/packages/widgets/src/lib/inferenceSnippets/serveCurl.ts
@@ -0,0 +1,63 @@
+import type { PipelineType } from "@huggingface/tasks";
+import type { ModelData } from "../interfaces/Types.js";
+import { getModelInputSnippet } from "./inputs.js";
+
+export const snippetBasic = (model: ModelData, accessToken: string): string =>
+	`curl https://api-inference.huggingface.co/models/${model.id} \\
+	-X POST \\
+	-d '{"inputs": ${getModelInputSnippet(model, true)}}' \\
+	-H 'Content-Type: application/json' \\
+	-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
+`;
+
+export const snippetZeroShotClassification = (model: ModelData, accessToken: string): string =>
+	`curl https://api-inference.huggingface.co/models/${model.id} \\
+	-X POST \\
+	-d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\
+	-H 'Content-Type: application/json' \\
+	-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
+`;
+
+export const snippetFile = (model: ModelData, accessToken: string): string =>
+	`curl https://api-inference.huggingface.co/models/${model.id} \\
+	-X POST \\
+	--data-binary '@${getModelInputSnippet(model, true, true)}' \\
+	-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"
+`;
+
+export const curlSnippets: Partial<Record<PipelineType, (model: ModelData, accessToken: string) => string>> = {
+	// Same order as in js/src/lib/interfaces/Types.ts
+	"text-classification": snippetBasic,
+	"token-classification": snippetBasic,
+	"table-question-answering": snippetBasic,
+	"question-answering": snippetBasic,
+	"zero-shot-classification": snippetZeroShotClassification,
+	translation: snippetBasic,
+	summarization: snippetBasic,
+	conversational: snippetBasic,
+	"feature-extraction": snippetBasic,
+	"text-generation": snippetBasic,
+	"text2text-generation": snippetBasic,
+	"fill-mask": snippetBasic,
+	"sentence-similarity": snippetBasic,
+	"automatic-speech-recognition": snippetFile,
+	"text-to-image": snippetBasic,
+	"text-to-speech": snippetBasic,
+	"text-to-audio": snippetBasic,
+	"audio-to-audio": snippetFile,
+	"audio-classification": snippetFile,
+	"image-classification": snippetFile,
+	"image-to-text": snippetFile,
+	"object-detection": snippetFile,
+	"image-segmentation": snippetFile,
+};
+
+export function getCurlInferenceSnippet(model: ModelData, accessToken: string): string {
+	return model.pipeline_tag && model.pipeline_tag in curlSnippets
+		? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
+		: "";
+}
+
+export function hasCurlInferenceSnippet(model: ModelData): boolean {
+	return !!model.pipeline_tag && model.pipeline_tag in curlSnippets;
+}
diff --git a/packages/widgets/src/lib/inferenceSnippets/serveJs.ts b/packages/widgets/src/lib/inferenceSnippets/serveJs.ts
new file mode 100644
index 0000000000000000000000000000000000000000..ef94c30645a68d6a62f5f4d23427346555d3fd7b
--- /dev/null
+++ b/packages/widgets/src/lib/inferenceSnippets/serveJs.ts
@@ -0,0 +1,150 @@
+import type { PipelineType } from "@huggingface/tasks";
+import type { ModelData } from "../interfaces/Types.js";
+import { getModelInputSnippet } from "./inputs.js";
+
+export const snippetBasic = (model: ModelData, accessToken: string): string =>
+	`async function query(data) {
+	const response = await fetch(
+		"https://api-inference.huggingface.co/models/${model.id}",
+		{
+			headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
+			method: "POST",
+			body: JSON.stringify(data),
+		}
+	);
+	const result = await response.json();
+	return result;
+}
+
+query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
+	console.log(JSON.stringify(response));
+});`;
+
+export const snippetZeroShotClassification = (model: ModelData, accessToken: string): string =>
+	`async function query(data) {
+	const response = await fetch(
+		"https://api-inference.huggingface.co/models/${model.id}",
+		{
+			headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
+			method: "POST",
+			body: JSON.stringify(data),
+		}
+	);
+	const result = await response.json();
+	return result;
+}
+
+query({"inputs": ${getModelInputSnippet(
+		model
+	)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => {
+	console.log(JSON.stringify(response));
+});`;
+
+export const snippetTextToImage = (model: ModelData, accessToken: string): string =>
+	`async function query(data) {
+	const response = await fetch(
+		"https://api-inference.huggingface.co/models/${model.id}",
+		{
+			headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
+			method: "POST",
+			body: JSON.stringify(data),
+		}
+	);
+	const result = await response.blob();
+	return result;
+}
+query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
+	// Use image
+});`;
+
+export const snippetTextToAudio = (model: ModelData, accessToken: string): string => {
+	const commonSnippet = `async function query(data) {
+		const response = await fetch(
+			"https://api-inference.huggingface.co/models/${model.id}",
+			{
+				headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
+				method: "POST",
+				body: JSON.stringify(data),
+			}
+		);`;
+	if (model.library_name === "transformers") {
+		return (
+			commonSnippet +
+			`
+			const result = await response.blob();
+			return result;
+		}
+		query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
+			// Returns a byte object of the Audio wavform. Use it directly!
+		});`
+		);
+	} else {
+		return (
+			commonSnippet +
+			`
+			const result = await response.json();
+			return result;
+		}
+		
+		query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
+			console.log(JSON.stringify(response));
+		});`
+		);
+	}
+};
+
+export const snippetFile = (model: ModelData, accessToken: string): string =>
+	`async function query(filename) {
+	const data = fs.readFileSync(filename);
+	const response = await fetch(
+		"https://api-inference.huggingface.co/models/${model.id}",
+		{
+			headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" },
+			method: "POST",
+			body: data,
+		}
+	);
+	const result = await response.json();
+	return result;
+}
+
+query(${getModelInputSnippet(model)}).then((response) => {
+	console.log(JSON.stringify(response));
+});`;
+
+export const jsSnippets: Partial<Record<PipelineType, (model: ModelData, accessToken: string) => string>> = {
+	// Same order as in js/src/lib/interfaces/Types.ts
+	"text-classification": snippetBasic,
+	"token-classification": snippetBasic,
+	"table-question-answering": snippetBasic,
+	"question-answering": snippetBasic,
+	"zero-shot-classification": snippetZeroShotClassification,
+	translation: snippetBasic,
+	summarization: snippetBasic,
+	conversational: snippetBasic,
+	"feature-extraction": snippetBasic,
+	"text-generation": snippetBasic,
+	"text2text-generation": snippetBasic,
+	"fill-mask": snippetBasic,
+	"sentence-similarity": snippetBasic,
+	"automatic-speech-recognition": snippetFile,
+	"text-to-image": snippetTextToImage,
+	"text-to-speech": snippetTextToAudio,
+	"text-to-audio": snippetTextToAudio,
+	"audio-to-audio": snippetFile,
+	"audio-classification": snippetFile,
+	"image-classification": snippetFile,
+	"image-to-text": snippetFile,
+	"object-detection": snippetFile,
+	"image-segmentation": snippetFile,
+};
+
+export function getJsInferenceSnippet(model: ModelData, accessToken: string): string {
+	return model.pipeline_tag && model.pipeline_tag in jsSnippets
+		? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? ""
+		: "";
+}
+
+export function hasJsInferenceSnippet(model: ModelData): boolean {
+	return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
+}
diff --git a/packages/widgets/src/lib/inferenceSnippets/servePython.ts b/packages/widgets/src/lib/inferenceSnippets/servePython.ts
new file mode 100644
index 0000000000000000000000000000000000000000..5ddbeabeb178164149565752336d1ede251d1a51
--- /dev/null
+++ b/packages/widgets/src/lib/inferenceSnippets/servePython.ts
@@ -0,0 +1,114 @@
+import type { PipelineType } from "@huggingface/tasks";
+import type { ModelData } from "../interfaces/Types.js";
+import { getModelInputSnippet } from "./inputs.js";
+
+export const snippetZeroShotClassification = (model: ModelData): string =>
+	`def query(payload):
+	response = requests.post(API_URL, headers=headers, json=payload)
+	return response.json()
+
+output = query({
+    "inputs": ${getModelInputSnippet(model)},
+    "parameters": {"candidate_labels": ["refund", "legal", "faq"]},
+})`;
+
+export const snippetBasic = (model: ModelData): string =>
+	`def query(payload):
+	response = requests.post(API_URL, headers=headers, json=payload)
+	return response.json()
+	
+output = query({
+	"inputs": ${getModelInputSnippet(model)},
+})`;
+
+export const snippetFile = (model: ModelData): string =>
+	`def query(filename):
+    with open(filename, "rb") as f:
+        data = f.read()
+    response = requests.post(API_URL, headers=headers, data=data)
+    return response.json()
+
+output = query(${getModelInputSnippet(model)})`;
+
+export const snippetTextToImage = (model: ModelData): string =>
+	`def query(payload):
+	response = requests.post(API_URL, headers=headers, json=payload)
+	return response.content
+image_bytes = query({
+	"inputs": ${getModelInputSnippet(model)},
+})
+# You can access the image with PIL.Image for example
+import io
+from PIL import Image
+image = Image.open(io.BytesIO(image_bytes))`;
+
+export const snippetTextToAudio = (model: ModelData): string => {
+	// Transformers TTS pipeline and api-inference-community (AIC) pipeline outputs are diverged
+	// with the latest update to inference-api (IA).
+	// Transformers IA returns a byte object (wav file), whereas AIC returns wav and sampling_rate.
+	if (model.library_name === "transformers") {
+		return `def query(payload):
+	response = requests.post(API_URL, headers=headers, json=payload)
+	return response.content
+
+audio_bytes = query({
+	"inputs": ${getModelInputSnippet(model)},
+})
+# You can access the audio with IPython.display for example
+from IPython.display import Audio
+Audio(audio_bytes)`;
+	} else {
+		return `def query(payload):
+	response = requests.post(API_URL, headers=headers, json=payload)
+	return response.json()
+	
+audio, sampling_rate = query({
+	"inputs": ${getModelInputSnippet(model)},
+})
+# You can access the audio with IPython.display for example
+from IPython.display import Audio
+Audio(audio, rate=sampling_rate)`;
+	}
+};
+export const pythonSnippets: Partial<Record<PipelineType, (model: ModelData) => string>> = {
+	// Same order as in js/src/lib/interfaces/Types.ts
+	"text-classification": snippetBasic,
+	"token-classification": snippetBasic,
+	"table-question-answering": snippetBasic,
+	"question-answering": snippetBasic,
+	"zero-shot-classification": snippetZeroShotClassification,
+	translation: snippetBasic,
+	summarization: snippetBasic,
+	conversational: snippetBasic,
+	"feature-extraction": snippetBasic,
+	"text-generation": snippetBasic,
+	"text2text-generation": snippetBasic,
+	"fill-mask": snippetBasic,
+	"sentence-similarity": snippetBasic,
+	"automatic-speech-recognition": snippetFile,
+	"text-to-image": snippetTextToImage,
+	"text-to-speech": snippetTextToAudio,
+	"text-to-audio": snippetTextToAudio,
+	"audio-to-audio": snippetFile,
+	"audio-classification": snippetFile,
+	"image-classification": snippetFile,
+	"image-to-text": snippetFile,
+	"object-detection": snippetFile,
+	"image-segmentation": snippetFile,
+};
+
+export function getPythonInferenceSnippet(model: ModelData, accessToken: string): string {
+	const body =
+		model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model) ?? "" : "";
+
+	return `import requests
+
+API_URL = "https://api-inference.huggingface.co/models/${model.id}"
+headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}}
+
+${body}`;
+}
+
+export function hasPythonInferenceSnippet(model: ModelData): boolean {
+	return !!model.pipeline_tag && model.pipeline_tag in pythonSnippets;
+}
diff --git a/packages/widgets/src/lib/interfaces/DefaultWidget.ts b/packages/widgets/src/lib/interfaces/DefaultWidget.ts
new file mode 100644
index 0000000000000000000000000000000000000000..e7ad1962157a8eecf7d88c0b81ae735924fbf3e9
--- /dev/null
+++ b/packages/widgets/src/lib/interfaces/DefaultWidget.ts
@@ -0,0 +1,718 @@
+import type { PipelineType } from "@huggingface/tasks";
+import type { WidgetExample } from "../components/InferenceWidget/shared/WidgetExample.js";
+
+type LanguageCode = string;
+
+type PerLanguageMapping = Map<PipelineType, string[] | WidgetExample[]>;
+
+/// NOTE TO CONTRIBUTORS:
+///
+/// When adding sample inputs for a new language, you don't
+/// necessarily have to translate the inputs from existing languages.
+/// (which were quite random to begin with)
+///
+/// i.e. Feel free to be creative and provide better samples.
+//
+
+/// The <mask> placeholder will be replaced by the correct mask token
+/// in the following examples, depending on the model type
+///
+/// see [INTERNAL] github.com/huggingface/moon-landing/blob/c5c3d45fe0ab27347b3ab27bdad646ef20732351/server/lib/App.ts#L254
+//
+
+const MAPPING_EN: PerLanguageMapping = new Map([
+	["text-classification", [`I like you. I love you`]],
+	[
+		"token-classification",
+		[
+			`My name is Wolfgang and I live in Berlin`,
+			`My name is Sarah and I live in London`,
+			`My name is Clara and I live in Berkeley, California.`,
+		],
+	],
+	[
+		"table-question-answering",
+		[
+			{
+				text: `How many stars does the transformers repository have?`,
+				table: {
+					Repository: ["Transformers", "Datasets", "Tokenizers"],
+					Stars: [36542, 4512, 3934],
+					Contributors: [651, 77, 34],
+					"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
+				},
+			},
+		],
+	],
+	[
+		"question-answering",
+		[
+			{
+				text: `Where do I live?`,
+				context: `My name is Wolfgang and I live in Berlin`,
+			},
+			{
+				text: `Where do I live?`,
+				context: `My name is Sarah and I live in London`,
+			},
+			{
+				text: `What's my name?`,
+				context: `My name is Clara and I live in Berkeley.`,
+			},
+			{
+				text: `Which name is also used to describe the Amazon rainforest in English?`,
+				context: `The Amazon rainforest (Portuguese: Floresta Amazônica or Amazônia; Spanish: Selva Amazónica, Amazonía or usually Amazonia; French: Forêt amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.`,
+			},
+		],
+	],
+	[
+		"zero-shot-classification",
+		[
+			{
+				text: "I have a problem with my iphone that needs to be resolved asap!!",
+				candidate_labels: "urgent, not urgent, phone, tablet, computer",
+				multi_class: true,
+			},
+			{
+				text: "Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.",
+				candidate_labels: "mobile, website, billing, account access",
+				multi_class: false,
+			},
+			{
+				text: "A new model offers an explanation for how the Galilean satellites formed around the solar system’s largest world. Konstantin Batygin did not set out to solve one of the solar system’s most puzzling mysteries when he went for a run up a hill in Nice, France. Dr. Batygin, a Caltech researcher, best known for his contributions to the search for the solar system’s missing “Planet Nine,” spotted a beer bottle. At a steep, 20 degree grade, he wondered why it wasn’t rolling down the hill. He realized there was a breeze at his back holding the bottle in place. Then he had a thought that would only pop into the mind of a theoretical astrophysicist: “Oh! This is how Europa formed.” Europa is one of Jupiter’s four large Galilean moons. And in a paper published Monday in the Astrophysical Journal, Dr. Batygin and a co-author, Alessandro Morbidelli, a planetary scientist at the Côte d’Azur Observatory in France, present a theory explaining how some moons form around gas giants like Jupiter and Saturn, suggesting that millimeter-sized grains of hail produced during the solar system’s formation became trapped around these massive worlds, taking shape one at a time into the potentially habitable moons we know today.",
+				candidate_labels: "space & cosmos, scientific discovery, microbiology, robots, archeology",
+				multi_class: true,
+			},
+		],
+	],
+	["translation", [`My name is Wolfgang and I live in Berlin`, `My name is Sarah and I live in London`]],
+	[
+		"summarization",
+		[
+			`The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.`,
+		],
+	],
+	[
+		"conversational",
+		[
+			`Hey my name is Julien! How are you?`,
+			`Hey my name is Thomas! How are you?`,
+			`Hey my name is Mariama! How are you?`,
+			`Hey my name is Clara! How are you?`,
+			`Hey my name is Julien! How are you?`,
+			`Hi.`,
+		],
+	],
+	[
+		"text-generation",
+		[
+			`My name is Julien and I like to`,
+			`My name is Thomas and my main`,
+			`My name is Mariama, my favorite`,
+			`My name is Clara and I am`,
+			`My name is Lewis and I like to`,
+			`My name is Merve and my favorite`,
+			`My name is Teven and I am`,
+			`Once upon a time,`,
+		],
+	],
+	["fill-mask", [`Paris is the <mask> of France.`, `The goal of life is <mask>.`]],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "That is a happy person",
+				sentences: ["That is a happy dog", "That is a very happy person", "Today is a sunny day"],
+			},
+		],
+	],
+]);
+
+const MAPPING_ZH: PerLanguageMapping = new Map([
+	["text-classification", [`我喜欢你。 我爱你`]],
+	["token-classification", [`我叫沃尔夫冈,我住在柏林。`, `我叫萨拉,我住在伦敦。`, `我叫克拉拉,我住在加州伯克利。`]],
+	[
+		"question-answering",
+		[
+			{
+				text: `我住在哪里?`,
+				context: `我叫沃尔夫冈,我住在柏林。`,
+			},
+			{
+				text: `我住在哪里?`,
+				context: `我叫萨拉,我住在伦敦。`,
+			},
+			{
+				text: `我的名字是什么?`,
+				context: `我叫克拉拉,我住在伯克利。`,
+			},
+		],
+	],
+	["translation", [`我叫沃尔夫冈,我住在柏林。`, `我叫萨拉,我住在伦敦。`]],
+	[
+		"zero-shot-classification",
+		[
+			{
+				text: "房间干净明亮,非常不错",
+				candidate_labels: "这是一条差评, 这是一条好评",
+			},
+		],
+	],
+	[
+		"summarization",
+		[
+			`该塔高324米(1063英尺),与一幢81层的建筑物一样高,是巴黎最高的建筑物。 它的底座是方形的,每边长125米(410英尺)。 在建造过程中,艾菲尔铁塔超过了华盛顿纪念碑,成为世界上最高的人造结构,它保持了41年的头衔,直到1930年纽约市的克莱斯勒大楼竣工。这是第一个到达300米高度的结构。 由于1957年在塔顶增加了广播天线,因此它现在比克莱斯勒大厦高5.2米(17英尺)。 除发射器外,艾菲尔铁塔是法国第二高的独立式建筑,仅次于米劳高架桥。`,
+		],
+	],
+	[
+		"text-generation",
+		[`我叫朱利安,我喜欢`, `我叫托马斯,我的主要`, `我叫玛丽亚,我最喜欢的`, `我叫克拉拉,我是`, `从前,`],
+	],
+	["fill-mask", [`巴黎是<mask>国的首都。`, `生活的真谛是<mask>。`]],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "那是 個快樂的人",
+				sentences: ["那是 條快樂的狗", "那是 個非常幸福的人", "今天是晴天"],
+			},
+		],
+	],
+]);
+
+const MAPPING_FR: PerLanguageMapping = new Map([
+	["text-classification", [`Je t'apprécie beaucoup. Je t'aime.`]],
+	["token-classification", [`Mon nom est Wolfgang et je vis à Berlin`]],
+	[
+		"question-answering",
+		[
+			{
+				text: `Où est-ce que je vis?`,
+				context: `Mon nom est Wolfgang et je vis à Berlin`,
+			},
+		],
+	],
+	["translation", [`Mon nom est Wolfgang et je vis à Berlin`]],
+	[
+		"summarization",
+		[
+			`La tour fait 324 mètres (1,063 pieds) de haut, environ la même hauteur qu'un immeuble de 81 étages, et est la plus haute structure de Paris. Sa base est carrée, mesurant 125 mètres (410 pieds) sur chaque côté. Durant sa construction, la tour Eiffel surpassa le Washington Monument pour devenir la plus haute structure construite par l'homme dans le monde, un titre qu'elle conserva pendant 41 ans jusqu'à l'achèvement du Chrysler Building à New-York City en 1930. Ce fut la première structure à atteindre une hauteur de 300 mètres. Avec l'ajout d'une antenne de radiodiffusion au sommet de la tour Eiffel en 1957, celle-ci redevint plus haute que le Chrysler Building de 5,2 mètres (17 pieds). En excluant les transmetteurs, elle est la seconde plus haute stucture autoportante de France après le viaduc de Millau.`,
+		],
+	],
+	["text-generation", [`Mon nom est Julien et j'aime`, `Mon nom est Thomas et mon principal`, `Il était une fois`]],
+	["fill-mask", [`Paris est la <mask> de la France.`]],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "C'est une personne heureuse",
+				sentences: [
+					"C'est un chien heureux",
+					"C'est une personne très heureuse",
+					"Aujourd'hui est une journée ensoleillée",
+				],
+			},
+		],
+	],
+]);
+
+const MAPPING_ES: PerLanguageMapping = new Map([
+	["text-classification", [`Te quiero. Te amo.`]],
+	["token-classification", [`Me llamo Wolfgang y vivo en Berlin`]],
+	[
+		"question-answering",
+		[
+			{
+				text: `¿Dónde vivo?`,
+				context: `Me llamo Wolfgang y vivo en Berlin`,
+			},
+			{
+				text: `¿Quién inventó el submarino?`,
+				context: `Isaac Peral fue un murciano que inventó el submarino`,
+			},
+			{
+				text: `¿Cuántas personas hablan español?`,
+				context: `El español es el segundo idioma más hablado del mundo con más de 442 millones de hablantes`,
+			},
+		],
+	],
+	[
+		"translation",
+		[
+			`Me llamo Wolfgang y vivo en Berlin`,
+			`Los ingredientes de una tortilla de patatas son: huevos, patatas y cebolla`,
+		],
+	],
+	[
+		"summarization",
+		[
+			`La torre tiene 324 metros (1.063 pies) de altura, aproximadamente la misma altura que un edificio de 81 pisos y la estructura más alta de París. Su base es cuadrada, mide 125 metros (410 pies) a cada lado. Durante su construcción, la Torre Eiffel superó al Washington Monument para convertirse en la estructura artificial más alta del mundo, un título que mantuvo durante 41 años hasta que el Chrysler Building en la ciudad de Nueva York se terminó en 1930. Fue la primera estructura en llegar Una altura de 300 metros. Debido a la adición de una antena de transmisión en la parte superior de la torre en 1957, ahora es más alta que el Chrysler Building en 5,2 metros (17 pies). Excluyendo los transmisores, la Torre Eiffel es la segunda estructura independiente más alta de Francia después del Viaducto de Millau.`,
+		],
+	],
+	[
+		"text-generation",
+		[
+			`Me llamo Julien y me gusta`,
+			`Me llamo Thomas y mi principal`,
+			`Me llamo Manuel y trabajo en`,
+			`Érase una vez,`,
+			`Si tú me dices ven, `,
+		],
+	],
+	["fill-mask", [`Mi nombre es <mask> y vivo en Nueva York.`, `El español es un idioma muy <mask> en el mundo.`]],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "Esa es una persona feliz",
+				sentences: ["Ese es un perro feliz", "Esa es una persona muy feliz", "Hoy es un día soleado"],
+			},
+		],
+	],
+]);
+
+const MAPPING_RU: PerLanguageMapping = new Map([
+	["text-classification", [`Ты мне нравишься. Я тебя люблю`]],
+	["token-classification", [`Меня зовут Вольфганг и я живу в Берлине`]],
+	[
+		"question-answering",
+		[
+			{
+				text: `Где живу?`,
+				context: `Меня зовут Вольфганг и я живу в Берлине`,
+			},
+		],
+	],
+	["translation", [`Меня зовут Вольфганг и я живу в Берлине`]],
+	[
+		"summarization",
+		[
+			`Высота башни составляет 324 метра (1063 фута), примерно такая же высота, как у 81-этажного здания, и самое высокое сооружение в Париже. Его основание квадратно, размером 125 метров (410 футов) с любой стороны. Во время строительства Эйфелева башня превзошла монумент Вашингтона, став самым высоким искусственным сооружением в мире, и этот титул она удерживала в течение 41 года до завершения строительство здания Крайслер в Нью-Йорке в 1930 году. Это первое сооружение которое достигло высоты 300 метров. Из-за добавления вещательной антенны на вершине башни в 1957 году она сейчас выше здания Крайслер на 5,2 метра (17 футов). За исключением передатчиков, Эйфелева башня является второй самой высокой отдельно стоящей структурой во Франции после виадука Мийо.`,
+		],
+	],
+	["text-generation", [`Меня зовут Жюльен и`, `Меня зовут Томас и мой основной`, `Однажды`]],
+	["fill-mask", [`Меня зовут <mask> и я инженер живущий в Нью-Йорке.`]],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "Это счастливый человек",
+				sentences: ["Это счастливая собака", "Это очень счастливый человек", "Сегодня солнечный день"],
+			},
+		],
+	],
+]);
+
+const MAPPING_UK: PerLanguageMapping = new Map([
+	["translation", [`Мене звати Вольфґанґ і я живу в Берліні.`]],
+	["fill-mask", [`Мене звати <mask>.`]],
+]);
+
+const MAPPING_IT: PerLanguageMapping = new Map([
+	["text-classification", [`Mi piaci. Ti amo`]],
+	[
+		"token-classification",
+		[
+			`Mi chiamo Wolfgang e vivo a Berlino`,
+			`Mi chiamo Sarah e vivo a Londra`,
+			`Mi chiamo Clara e vivo a Berkeley in California.`,
+		],
+	],
+	[
+		"question-answering",
+		[
+			{
+				text: `Dove vivo?`,
+				context: `Mi chiamo Wolfgang e vivo a Berlino`,
+			},
+			{
+				text: `Dove vivo?`,
+				context: `Mi chiamo Sarah e vivo a Londra`,
+			},
+			{
+				text: `Come mio chiamo?`,
+				context: `Mi chiamo Clara e vivo a Berkeley.`,
+			},
+		],
+	],
+	["translation", [`Mi chiamo Wolfgang e vivo a Berlino`, `Mi chiamo Sarah e vivo a Londra`]],
+	[
+		"summarization",
+		[
+			`La torre degli Asinelli è una delle cosiddette due torri di Bologna, simbolo della città, situate in piazza di porta Ravegnana, all'incrocio tra le antiche strade San Donato (ora via Zamboni), San Vitale, Maggiore e Castiglione. Eretta, secondo la tradizione, fra il 1109 e il 1119 dal nobile Gherardo Asinelli, la torre è alta 97,20 metri, pende verso ovest per 2,23 metri e presenta all'interno una scalinata composta da 498 gradini. Ancora non si può dire con certezza quando e da chi fu costruita la torre degli Asinelli. Si presume che la torre debba il proprio nome a Gherardo Asinelli, il nobile cavaliere di fazione ghibellina al quale se ne attribuisce la costruzione, iniziata secondo una consolidata tradizione l'11 ottobre 1109 e terminata dieci anni dopo, nel 1119.`,
+		],
+	],
+	[
+		"text-generation",
+		[
+			`Mi chiamo Loreto e mi piace`,
+			`Mi chiamo Thomas e il mio principale`,
+			`Mi chiamo Marianna, la mia cosa preferita`,
+			`Mi chiamo Clara e sono`,
+			`C'era una volta`,
+		],
+	],
+	["fill-mask", [`Roma è la <mask> d'Italia.`, `Lo scopo della vita è <mask>.`]],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "Questa è una persona felice",
+				sentences: ["Questo è un cane felice", "Questa è una persona molto felice", "Oggi è una giornata di sole"],
+			},
+		],
+	],
+]);
+
+const MAPPING_FA: PerLanguageMapping = new Map([
+	[
+		"text-classification",
+		[`پروژه به موقع تحویل شد و همه چیز خوب بود.`, `سیب‌زمینی بی‌کیفیت بود.`, `قیمت و کیفیت عالی`, `خوب نبود اصلا`],
+	],
+	[
+		"token-classification",
+		[
+			`این سریال به صورت رسمی در تاریخ دهم می ۲۰۱۱ توسط شبکه فاکس برای پخش رزرو شد.`,
+			`دفتر مرکزی شرکت پارس‌مینو در شهر اراک در استان مرکزی قرار دارد.`,
+			`وی در سال ۲۰۱۳ درگذشت و مسئول خاکسپاری و اقوامش برای او مراسم یادبود گرفتند.`,
+		],
+	],
+	[
+		"question-answering",
+		[
+			{
+				text: `من کجا زندگی میکنم؟`,
+				context: `نام من پژمان است و در گرگان زندگی میکنم.`,
+			},
+			{
+				text: `نامم چیست و کجا زندگی می‌کنم؟`,
+				context: `اسمم سارا است و در آفریقای جنوبی زندگی میکنم.`,
+			},
+			{
+				text: `نام من چیست؟`,
+				context: `من مریم هستم و در تبریز زندگی می‌کنم.`,
+			},
+			{
+				text: `بیشترین مساحت جنگل آمازون در کدام کشور است؟`,
+				context: [
+					"آمازون نام بزرگ‌ترین جنگل بارانی جهان است که در شمال آمریکای جنوبی قرار گرفته و بیشتر آن در خاک برزیل و پرو",
+					"جای دارد. بیش از نیمی از همه جنگل‌های بارانی باقی‌مانده در جهان در آمازون قرار دارد.",
+					"مساحت جنگل‌های آمازون ۵٫۵ میلیون کیلومتر مربع است که بین ۹ کشور تقسیم شده‌است.",
+				].join("\n"),
+			},
+		],
+	],
+	[
+		"translation",
+		[
+			"بیشتر مساحت جنگل‌های آمازون در حوضه آبریز رود آمازون و ۱۱۰۰ شاخه آن واقع شده‌است.",
+			"مردمان نَبَطی از هزاره‌های یکم و دوم پیش از میلاد در این منطقه زندگی می‌کردند.",
+		],
+	],
+	[
+		"summarization",
+		[
+			[
+				"شاهنامه اثر حکیم ابوالقاسم فردوسی توسی، حماسه‌ای منظوم، بر حسب دست نوشته‌های ",
+				"موجود دربرگیرنده نزدیک به ۵۰٬۰۰۰ بیت تا نزدیک به ۶۱٬۰۰۰ بیت و یکی از ",
+				"بزرگ‌ترین و برجسته‌ترین سروده‌های حماسی جهان است که سرایش آن دست‌آوردِ ",
+				"دست‌کم سی سال کارِ پیوستهٔ این سخن‌سرای نامدار ایرانی است. موضوع این شاهکار ادبی،",
+				" افسانه‌ها و تاریخ ایران از آغاز تا حملهٔ عرب‌ها به ایران در سدهٔ هفتم میلادی است",
+				"  (شاهنامه از سه بخش اسطوره، پهلوانی و تاریخی تشکیل شده‌است) که در چهار",
+				"   دودمان پادشاهیِ پیشدادیان، کیانیان، اشکانیان و ساسانیان گنجانده می‌شود.",
+				"    شاهنامه بر وزن «فَعولُن فعولن فعولن فَعَلْ»، در بحرِ مُتَقارِبِ مثمَّنِ محذوف نگاشته شده‌است.",
+				"هنگامی که زبان دانش و ادبیات در ایران زبان عربی بود، فردوسی، با سرودن شاهنامه",
+				" با ویژگی‌های هدف‌مندی که داشت، زبان پارسی را زنده و پایدار کرد. یکی از ",
+				" بن‌مایه‌های مهمی که فردوسی برای سرودن شاهنامه از آن استفاده کرد،",
+				"  شاهنامهٔ ابومنصوری بود. شاهنامه نفوذ بسیاری در جهت‌گیری ",
+				"  فرهنگ فارسی و نیز بازتاب‌های شکوه‌مندی در ادبیات جهان داشته‌است و شاعران ",
+				"  بزرگی مانند گوته و ویکتور هوگو از آن به نیکی یاد کرده‌اند.",
+			].join("\n"),
+		],
+	],
+	["text-generation", ["اسم من نازنین است و من", "روزی روزگاری"]],
+	[
+		"fill-mask",
+		[
+			`زندگی یک سوال است و این که چگونه <mask> کنیم پاسخ این سوال!`,
+			`زندگی از مرگ پرسید: چرا همه من را <mask> دارند اما از تو متنفرند؟`,
+		],
+	],
+]);
+
+const MAPPING_AR: PerLanguageMapping = new Map([
+	["text-classification", [`أحبك. أهواك`]],
+	[
+		"token-classification",
+		[`إسمي محمد وأسكن في برلين`, `إسمي ساره وأسكن في لندن`, `إسمي سامي وأسكن في القدس في فلسطين.`],
+	],
+	[
+		"question-answering",
+		[
+			{
+				text: `أين أسكن؟`,
+				context: `إسمي محمد وأسكن في بيروت`,
+			},
+			{
+				text: `أين أسكن؟`,
+				context: `إسمي ساره وأسكن في لندن`,
+			},
+			{
+				text: `ما اسمي؟`,
+				context: `اسمي سعيد وأسكن في حيفا.`,
+			},
+			{
+				text: `ما لقب خالد بن الوليد بالعربية؟`,
+				context: `خالد بن الوليد من أبطال وقادة الفتح الإسلامي وقد تحدثت عنه اللغات الإنجليزية والفرنسية والإسبانية ولقب بسيف الله المسلول.`,
+			},
+		],
+	],
+	["translation", [`إسمي محمد وأسكن في برلين`, `إسمي ساره وأسكن في لندن`]],
+	[
+		"summarization",
+		[
+			`تقع الأهرامات في الجيزة قرب القاهرة في مصر وقد بنيت منذ عدة قرون، وقيل إنها كانت قبورا للفراعنة وتم بناؤها بعملية هندسية رائعة واستقدمت حجارتها من جبل المقطم وتم نقلها بالسفن أو على الرمل، وما تزال شامخة ويقصدها السياح من كافة أرجاء المعمورة.`,
+		],
+	],
+	[
+		"text-generation",
+		[
+			`إسمي محمد وأحب أن`,
+			`دع المكارم لا ترحل لبغيتها - واقعد فإنك أنت الطاعم الكاسي.`,
+			`لماذا نحن هنا؟`,
+			`القدس مدينة تاريخية، بناها الكنعانيون في`,
+			`كان يا ما كان في قديم الزمان`,
+		],
+	],
+	["fill-mask", [`باريس <mask> فرنسا.`, `فلسفة الحياة هي <mask>.`]],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "هذا شخص سعيد",
+				sentences: ["هذا كلب سعيد", "هذا شخص سعيد جدا", "اليوم هو يوم مشمس"],
+			},
+		],
+	],
+]);
+
+const MAPPING_BN: PerLanguageMapping = new Map([
+	["text-classification", [`বাঙালির ঘরে ঘরে আজ নবান্ন উৎসব।`]],
+	[
+		"token-classification",
+		[`আমার নাম জাহিদ এবং আমি ঢাকায় বাস করি।`, `তিনি গুগলে চাকরী করেন।`, `আমার নাম সুস্মিতা এবং আমি কলকাতায় বাস করি।`],
+	],
+	["translation", [`আমার নাম জাহিদ, আমি রংপুরে বাস করি।`, `আপনি কী আজকে বাসায় আসবেন?`]],
+	[
+		"summarization",
+		[
+			`‘ইকোনমিস্ট’ লিখেছে, অ্যান্টিবডির চার মাস স্থায়ী হওয়ার খবরটি দুই কারণে আনন্দের। অ্যান্টিবডি যত দিন পর্যন্ত শরীরে টিকবে, তত দিন সংক্রমণ থেকে সুরক্ষিত থাকা সম্ভব। অর্থাৎ, এমন এক টিকার প্রয়োজন হবে, যা অ্যান্টিবডির উত্পাদনকে প্ররোচিত করতে পারে এবং দীর্ঘস্থায়ী সুরক্ষা দিতে পারে। এগুলো খুঁজে বের করাও সহজ। এটি আভাস দেয়, ব্যাপক হারে অ্যান্টিবডি শনাক্তকরণ ফলাফল মোটামুটি নির্ভুল হওয়া উচিত। দ্বিতীয় আরেকটি গবেষণার নেতৃত্ব দিয়েছেন যুক্তরাজ্যের মেডিকেল রিসার্চ কাউন্সিলের (এমআরসি) ইমিউনোলজিস্ট তাও দং। তিনি টি-সেল শনাক্তকরণে কাজ করেছেন। টি-সেল শনাক্তকরণের প্রক্রিয়া অবশ্য অ্যান্টিবডির মতো এত আলোচিত নয়। তবে সংক্রমণের বিরুদ্ধে লড়াই এবং দীর্ঘমেয়াদি সুরক্ষায় সমান গুরুত্বপূর্ণ ভূমিকা পালন করে। গবেষণাসংক্রান্ত নিবন্ধ প্রকাশিত হয়েছে ‘নেচার ইমিউনোলজি’ সাময়িকীতে। তাঁরা বলছেন, গবেষণার ক্ষেত্রে কোভিড-১৯ মৃদু সংক্রমণের শিকার ২৮ ব্যক্তির রক্তের নমুনা, ১৪ জন গুরুতর অসুস্থ ও ১৬ জন সুস্থ ব্যক্তির রক্তের নমুনা পরীক্ষা করেছেন। গবেষণা নিবন্ধে বলা হয়, সংক্রমিত ব্যক্তিদের ক্ষেত্রে টি-সেলের তীব্র প্রতিক্রিয়া তাঁরা দেখেছেন। এ ক্ষেত্রে মৃদু ও গুরুতর অসুস্থ ব্যক্তিদের ক্ষেত্রে প্রতিক্রিয়ার ভিন্নতা পাওয়া গেছে।`,
+		],
+	],
+	["text-generation", [`আমি রতন এবং আমি`, `তুমি যদি চাও তবে`, `মিথিলা আজকে বড্ড`]],
+	["fill-mask", [`আমি বাংলায় <mask> গাই।`, `আমি <mask> খুব ভালোবাসি। `]],
+	[
+		"question-answering",
+		[
+			{
+				text: `প্রথম এশিয়া কাপ ক্রিকেট টুর্নামেন্ট কোথায় অনুষ্ঠিত হয় ?`,
+				context: `প্রথম টুর্নামেন্ট অনুষ্ঠিত হয় ১৯৮৪ সালে সংযুক্ত আরব আমিরাত এর শারজাহ তে যেখানে কাউন্সিলের মূল অফিস ছিল (১৯৯৫ পর্যন্ত)। ভারত শ্রীলঙ্কার সাথে আন্তরিকতাহীন ক্রিকেট সম্পর্কের কারণে ১৯৮৬ সালের টুর্নামেন্ট বর্জন করে। ১৯৯৩ সালে ভারত ও পাকিস্তান এর মধ্যে রাজনৈতিক অস্থিরতার কারণে এটি বাতিল হয়ে যায়। শ্রীলঙ্কা এশিয়া কাপ শুরু থেকে অংশ গ্রহণ করে আসছে। আন্তর্জাতিক ক্রিকেট কাউন্সিল নিয়ম করে দিয়েছে যে এশিয়া কাপের সকল খেলা অনুষ্ঠিত হবে অফিসিয়াল একদিনের আন্তর্জাতিক ক্রিকেট হিসেবে। এসিসি ঘোষনা অনুযায়ী প্রতি দুই বছর পর পর টুর্নামেন্ট অনুষ্ঠিত হয় ২০০৮ সাল থেকে।`,
+			},
+			{
+				text: `ভারতীয় বাঙালি কথাসাহিত্যিক মহাশ্বেতা দেবীর মৃত্যু কবে হয় ?`,
+				context: `২০১৬ সালের ২৩ জুলাই হৃদরোগে আক্রান্ত হয়ে মহাশ্বেতা দেবী কলকাতার বেল ভিউ ক্লিনিকে ভর্তি হন। সেই বছরই ২৮ জুলাই একাধিক অঙ্গ বিকল হয়ে তাঁর মৃত্যু ঘটে। তিনি মধুমেহ, সেপ্টিসেমিয়া ও মূত্র সংক্রমণ রোগেও ভুগছিলেন।`,
+			},
+			{
+				text: `মাস্টারদা সূর্যকুমার সেনের বাবার নাম কী ছিল ?`,
+				context: `সূর্য সেন ১৮৯৪ সালের ২২ মার্চ চট্টগ্রামের রাউজান থানার নোয়াপাড়ায় অর্থনৈতিক ভাবে অস্বচ্ছল পরিবারে জন্মগ্রহণ করেন। তাঁর পিতার নাম রাজমনি সেন এবং মাতার নাম শশী বালা সেন। রাজমনি সেনের দুই ছেলে আর চার মেয়ে। সূর্য সেন তাঁদের পরিবারের চতুর্থ সন্তান। দুই ছেলের নাম সূর্য ও কমল। চার মেয়ের নাম বরদাসুন্দরী, সাবিত্রী, ভানুমতী ও প্রমিলা। শৈশবে পিতা মাতাকে হারানো সূর্য সেন কাকা গৌরমনি সেনের কাছে মানুষ হয়েছেন। সূর্য সেন ছেলেবেলা থেকেই খুব মনোযোগী ভাল ছাত্র ছিলেন এবং ধর্মভাবাপন্ন গম্ভীর প্রকৃতির ছিলেন।`,
+			},
+		],
+	],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "সে একজন সুখী ব্যক্তি",
+				sentences: ["সে হ্যাপি কুকুর", "সে খুব সুখী মানুষ", "আজ একটি রৌদ্রোজ্জ্বল দিন"],
+			},
+		],
+	],
+]);
+
+const MAPPING_MN: PerLanguageMapping = new Map([
+	["text-classification", [`Би чамд хайртай`]],
+	[
+		"token-classification",
+		[
+			`Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`,
+			`Намайг Ганбат гэдэг. Би Увс аймагт төрсөн.`,
+			`Манай улс таван хошуу малтай.`,
+		],
+	],
+	[
+		"question-answering",
+		[
+			{
+				text: `Та хаана амьдардаг вэ?`,
+				context: `Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`,
+			},
+			{
+				text: `Таныг хэн гэдэг вэ?`,
+				context: `Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`,
+			},
+			{
+				text: `Миний нэрийг хэн гэдэг вэ?`,
+				context: `Намайг Ганбат гэдэг. Би Увс аймагт төрсөн.`,
+			},
+		],
+	],
+	["translation", [`Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`, `Намайг Ганбат гэдэг. Би Увс аймагт төрсөн.`]],
+	[
+		"summarization",
+		[
+			`Монгол Улс (1992 оноос хойш) — дорно болон төв Азид оршдог бүрэн эрхт улс. Хойд талаараа Орос, бусад талаараа Хятад улстай хиллэдэг далайд гарцгүй орон. Нийслэл — Улаанбаатар хот. Алтайн нуруунаас Хянган, Соёноос Говь хүрсэн 1 сая 566 мянган км2 уудам нутагтай, дэлхийд нутаг дэвсгэрийн хэмжээгээр 19-рт жагсдаг. 2015 оны эхэнд Монгол Улсын хүн ам 3 сая хүрсэн (135-р олон). Үндсэндээ монгол үндэстэн (95 хувь), мөн хасаг, тува хүн байна. 16-р зуунаас хойш буддын шашин, 20-р зуунаас шашингүй байдал дэлгэрсэн ба албан хэрэгт монгол хэлээр харилцана.`,
+		],
+	],
+	[
+		"text-generation",
+		[`Намайг Дорж гэдэг. Би`, `Хамгийн сайн дуучин бол`, `Миний дуртай хамтлаг бол`, `Эрт урьдын цагт`],
+	],
+	["fill-mask", [`Монгол улсын <mask> Улаанбаатар хотоос ярьж байна.`, `Миний амьдралын зорилго бол <mask>.`]],
+	[
+		"automatic-speech-recognition",
+		[
+			{
+				label: `Common Voice Train Example`,
+				src: `https://cdn-media.huggingface.co/common_voice/train/common_voice_mn_18577472.wav`,
+			},
+			{
+				label: `Common Voice Test Example`,
+				src: `https://cdn-media.huggingface.co/common_voice/test/common_voice_mn_18577346.wav`,
+			},
+		],
+	],
+	[
+		"text-to-speech",
+		[
+			`Би Монгол улсын иргэн.`,
+			`Энэхүү жишээ нь цаанаа ямар ч утга агуулаагүй болно`,
+			`Сар шинэдээ сайхан шинэлэж байна уу?`,
+		],
+	],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "Энэ бол аз жаргалтай хүн юм",
+				sentences: ["Энэ бол аз жаргалтай нохой юм", "Энэ бол маш их аз жаргалтай хүн юм", "Өнөөдөр нарлаг өдөр байна"],
+			},
+		],
+	],
+]);
+
+const MAPPING_SI: PerLanguageMapping = new Map([
+	["translation", [`සිංහල ඉතා අලංකාර භාෂාවකි.`, `මෙම තාක්ෂණය භාවිතා කරන ඔබට ස්තූතියි.`]],
+	["fill-mask", [`මම ගෙදර <mask>.`, `<mask> ඉගෙනීමට ගියාය.`]],
+]);
+
+const MAPPING_DE: PerLanguageMapping = new Map([
+	[
+		"question-answering",
+		[
+			{
+				text: `Wo wohne ich?`,
+				context: `Mein Name ist Wolfgang und ich lebe in Berlin`,
+			},
+			{
+				text: `Welcher Name wird auch verwendet, um den Amazonas-Regenwald auf Englisch zu beschreiben?`,
+				context: `Der Amazonas-Regenwald, auf Englisch auch als Amazonien oder Amazonas-Dschungel bekannt, ist ein feuchter Laubwald, der den größten Teil des Amazonas-Beckens Südamerikas bedeckt. Dieses Becken umfasst 7.000.000 Quadratkilometer (2.700.000 Quadratmeilen), von denen 5.500.000 Quadratkilometer (2.100.000 Quadratmeilen) vom Regenwald bedeckt sind. Diese Region umfasst Gebiete von neun Nationen. Der größte Teil des Waldes befindet sich in Brasilien mit 60% des Regenwaldes, gefolgt von Peru mit 13%, Kolumbien mit 10% und geringen Mengen in Venezuela, Ecuador, Bolivien, Guyana, Suriname und Französisch-Guayana. Staaten oder Abteilungen in vier Nationen enthalten "Amazonas" in ihren Namen. Der Amazonas repräsentiert mehr als die Hälfte der verbleibenden Regenwälder des Planeten und umfasst den größten und artenreichsten tropischen Regenwald der Welt mit geschätzten 390 Milliarden Einzelbäumen, die in 16.000 Arten unterteilt sind.`,
+			},
+		],
+	],
+	[
+		"sentence-similarity",
+		[
+			{
+				source_sentence: "Das ist eine glückliche Person",
+				sentences: [
+					"Das ist ein glücklicher Hund",
+					"Das ist eine sehr glückliche Person",
+					"Heute ist ein sonniger Tag",
+				],
+			},
+		],
+	],
+]);
+
+const MAPPING_DV: PerLanguageMapping = new Map([
+	["text-classification", [`އަހަރެން ގަޔާވޭ. އަހަރެން ލޯބިވޭ`]],
+	[
+		"token-classification",
+		[
+			`އަހަރެންގެ ނަމަކީ އަހުމަދު އަދި އަހަރެން ދިރިއުޅެނީ މާލޭގަ`,
+			`އަހަރެންގެ ނަމަކީ ސާރާ އަދި އަހަރެން ދިރިއުޅެނީ އުތީމުގަ`,
+			`އަހަރެންގެ ނަމަކީ އައިޝާ އަދި އަހަރެން ދިރިއުޅެނީ ފޭދޫ، އައްޑޫގަ`,
+		],
+	],
+	[
+		"question-answering",
+		[
+			{
+				text: `އަހަރެން ދިރިއުޅެނީ ކޮންތާކު؟`,
+				context: `އަހަރެންގެ ނަމަކީ އަހުމަދު އަދި އަހަރެން ދިރިއުޅެނީ މާލޭގަ`,
+			},
+			{
+				text: `އަހަރެން ދިރިއުޅެނީ ކޮންތާކު؟`,
+				context: `އަހަރެންގެ ނަމަކީ ސާރާ އަދި އަހަރެން ދިރިއުޅެނީ އުތީމުގަ`,
+			},
+			{
+				text: `އަހަރެންގެ ނަމަކީ ކޮބާ؟`,
+				context: `އަހަރެންގެ ނަމަކީ އައިޝާ އަދި އަހަރެން ދިރިއުޅެނީ ފޭދޫގަ`,
+			},
+			{
+				text: `އެމޭޒަން ރެއިންފޮރެސްޓް ސިފަކޮށްދިނުމަށް އިނގިރޭސި ބަހުން ބޭނުންކުރާނީ ކޮންނަމެއް؟`,
+				context: `އެމޭޒަން ރެއިންފޮރެސްޓް (ޕޯޗުޖީޒް: ފްލޮރެސްޓާ އެމަސޮނިކާ ނުވަތަ އެމަސޮނިއާ؛ ސްޕެނިޝް: ސެލްވާ އެމަސޮނިކާ, އެމަސޮނިއާ ނޫނީ އާންމުކޮށް އެމަޒޯނިއާ؛ ފްރެންޗް: ފޮރޭ އެމެޒޮނިއެން؛ ޑަޗް: އެމެޒޯންރޭގެވައުޑް)، އިގިރޭސި ބަހުން ބުނާ އެމެޒޯނިއާ ނުވަތަ ދަ އެމޭޒަން ޖަންގަލް އަކީ, ސައުތު އެމެރިކާގެ އެމޭޒަން ބޭސިން ސަރަހައްދުގެ ބޮޑުބައެއްގައި ހިމެނޭ މޮއިސްޓް ބޮރޯޑްލީފް ފޮރެސްޓެއެކެވެ. އެމޭޒަން ބޭސިން ސަރަހައްދުގެ ބޮޑު މިނަކީ 7 މިލިއަން އަކަ ކިލޯމީޓަރ (2.7 މިލިއަން އަކަ މައިލް(. މީގެ ތެރެއިން 5.5 މިލިއަން އަކަ ކިލޯމީޓަރ (2.1 މިލިއަން އަކަ މައިލް) އަކީ މި ފޮރެސްޓެވެ. މި ސަރަހައްދުގައި 9 ގައުމަކަށް ނިސްބަތްވާ ޓެރިޓަރީ ހިމެނެއެވެ.  60% އާއިއެކެ އެންމެ ބޮޑު ބައެއް ނިސްބަތްވަނީ ބްރެޒިލްއަށެވެ. އޭގެ ފަހުތުން 13% އާއެކު ޕެރޫ އާއި 10% އާއެކު ކޮލަމްބިއާ އަދި ކުޑަ ބައެއް ހިމެނޭ ގޮތުން ވެނެޒުއެލާ, އެކްއަޑޯ, ބޮލިވިއާ, ގުޔާނާ, ސުރިނާމް އަދި ފްރެންޗް ގްއާނާ އަށް ވެސް ނިސްބަތްވެއެވެ. މީގެ ތެރެއިން 4 ގައުމެއްގައި "އެމެޒޮނާސް" ހިމަނައިގެން ސްޓޭޓް ނުވަތަ ޑިޕާޓްމަންޓް އަކަށް ނަންދީފައިވެއެވެ. މުޅި ދުނިޔޭގައި ބާކީ ހުރި ރެއިންފޮރެސްޓްގެ ތެރެއިން ދެބައިކުޅަ އެއްބަޔަށްވުރެބޮޑުވަރެއް އެމޭޒޮން ރެއިންފޮރެސްޓް ހިއްސާކުރެއެވެ. މިއީ މުޅި ދުނިޔެއިން އެންމޮ ބޮޑު އަދި އެންމެ ބައޮޑައިވަރސް ރެއިންފޮރެސްޓް ޓްރެކްޓެވެ. ލަފާކުރެވޭ ގޮތުން 16 ހާސް ސްޕީޝީސްއަށް ބެހިގެންވާ 390 މިލިއަން ވައްތަރުގެ ގަސް މިތާގައި ހިމެނެއެވެ`,
+			},
+		],
+	],
+	[
+		"translation",
+		[
+			`އަހަރެންގެ ނަމަކީ އަހުމަދު އަދި އަހަރެން ދިރިއުޅެނީ މާލޭގަ`,
+			`އަހަރެންގެ ނަމަކީ ސާރާ އަދި އަހަރެން ދިރިއުޅެނީ އުތީމުގަ`,
+		],
+	],
+	[
+		"summarization",
+		[
+			`ޓަވަރުގެ އުސްމިނަކީ 324 މީޓަރު، އެއީ ގާތްގަނޑަކަށް 81 ބުރީގެ އިމާރާތަކާއި އެއްވަރެވެ. އެއީ ޕެރިސްގައި ހުރި އެންމެ އުސް އިމާރާތެވެ. އޭގެ ހަތަރެސްކަނަށް ހުރި ބުޑުގެ ދިގުމިނަކީ ކޮންމެ ފަރާތަކުން 125 މީޓަރެވެ. (410 ފޫޓު) އައިފިލް ޓަވަރު ބިނާކުރި އިރު، ވޮޝިންގްޓަން މޮނިއުމެންޓްގެ އުސްމިން ފަހަނައަޅާ ގޮސް، ދުނިޔޭގައި މީހުން އުފެއްދި ތަންތަނުގެ ތެރެއިން އެންމެ އުސް ތަނުގެ ލަގަބު ލިބުނެވެ. އަދި 1930 ގައި ނިއު ޔޯކްގެ ކްރައިސްލަރ ބިލްޑިންގް ބިނާކުރުމާއި ހަމައަށް 41 އަހަރު ވަންދެން މިލަގަބު ހިފެހެއްޓިއެވެ. މިއީ 300 މީޓަރަށް ވުރެ އުސްކޮށް އިމާރާތްކުރެވުނު ފުރަތަމަ ތަނެވެ. 1957 ގައި ޓަވަރުގެ އެންމެ މަތީގައި ހަރުކުރެވުނު ބްރޯޑްކާސްޓިންގ އޭރިއަލްގެ ސަބަބުން މިހާރު މި ޓަވަރު ކްރައިސްލަރ ބިލްޑިންގއަށް ވުރެ 5.2 މީޓަރ (17 ފޫޓު) އުހެވެ. މި ޓްރާންސްމިޓަރު ނުލާ، އައިފިލް ޓަވަރަކީ، މިލާއު ވިއާޑަކްޓަށް ފަހު ފްރާންސްގައި ހުރި 2 ވަނައަށް އެންމެ އުސް ފްރީސްޓޭންޑިންގ އިމާރާތެވެ`,
+		],
+	],
+	[
+		"text-generation",
+		[
+			`އަހަރެންގެ ނަމަކީ ޔޫސުފް އަދި އަހަރެންގެ މައިގަނޑު`,
+			`އަހަރެންގެ ނަމަކީ މަރިއަމް، އަހަރެން އެންމެ ގަޔާވާ`,
+			`އަހަރެންގެ ނަމަކީ ފާތުމަތު އަދި އަހަރެން`,
+			`،އެއް ޒަމާނެއްގައި`,
+		],
+	],
+	["fill-mask", [`.<mask> މާލެ އަކީ ދިވެހިރާއްޖޭގެ`, `ގަރުދިޔައަކީ ދިވެހިންގެ މެދުގައި <mask> ކެއުމެއް.`]],
+]);
+
+export const MAPPING_DEFAULT_WIDGET = new Map<LanguageCode, PerLanguageMapping>([
+	["en", MAPPING_EN],
+	["zh", MAPPING_ZH],
+	["fr", MAPPING_FR],
+	["es", MAPPING_ES],
+	["ru", MAPPING_RU],
+	["uk", MAPPING_UK],
+	["it", MAPPING_IT],
+	["fa", MAPPING_FA],
+	["ar", MAPPING_AR],
+	["bn", MAPPING_BN],
+	["mn", MAPPING_MN],
+	["si", MAPPING_SI],
+	["de", MAPPING_DE],
+	["dv", MAPPING_DV],
+]);
diff --git a/packages/widgets/src/lib/interfaces/InferenceDisplayability.ts b/packages/widgets/src/lib/interfaces/InferenceDisplayability.ts
new file mode 100644
index 0000000000000000000000000000000000000000..06f3fd4037da41d531e45a75f8c5c9b42654a6a2
--- /dev/null
+++ b/packages/widgets/src/lib/interfaces/InferenceDisplayability.ts
@@ -0,0 +1,14 @@
+export enum InferenceDisplayability {
+	/**
+	 * Yes
+	 */
+	Yes = "Yes",
+	/**
+	 * And then, all the possible reasons why it's no:
+	 */
+	ExplicitOptOut = "ExplicitOptOut",
+	CustomCode = "CustomCode",
+	LibraryNotDetected = "LibraryNotDetected",
+	PipelineNotDetected = "PipelineNotDetected",
+	PipelineLibraryPairNotSupported = "PipelineLibraryPairNotSupported",
+}
diff --git a/packages/widgets/src/lib/interfaces/Libraries.ts b/packages/widgets/src/lib/interfaces/Libraries.ts
new file mode 100644
index 0000000000000000000000000000000000000000..d723b627c05c4ccc79a489d349eb9e14015ef6ff
--- /dev/null
+++ b/packages/widgets/src/lib/interfaces/Libraries.ts
@@ -0,0 +1,766 @@
+import type { ModelLibraryKey } from "@huggingface/tasks";
+/* eslint-disable @typescript-eslint/naming-convention */
+import type { ModelData } from "./Types.js";
+
+/**
+ * Elements configurable by a model library.
+ */
+export interface LibraryUiElement {
+	/**
+	 * Name displayed on the main
+	 * call-to-action button on the model page.
+	 */
+	btnLabel: string;
+	/**
+	 * Repo name
+	 */
+	repoName: string;
+	/**
+	 * URL to library's repo
+	 */
+	repoUrl: string;
+	/**
+	 * URL to library's docs
+	 */
+	docsUrl?: string;
+	/**
+	 * Code snippet displayed on model page
+	 */
+	snippets: (model: ModelData) => string[];
+}
+
+function nameWithoutNamespace(modelId: string): string {
+	const splitted = modelId.split("/");
+	return splitted.length === 1 ? splitted[0] : splitted[1];
+}
+
+//#region snippets
+
+const adapter_transformers = (model: ModelData) => [
+	`from transformers import ${model.config?.adapter_transformers?.model_class}
+
+model = ${model.config?.adapter_transformers?.model_class}.from_pretrained("${model.config?.adapter_transformers?.model_name}")
+model.load_adapter("${model.id}", source="hf")`,
+];
+
+const allennlpUnknown = (model: ModelData) => [
+	`import allennlp_models
+from allennlp.predictors.predictor import Predictor
+
+predictor = Predictor.from_path("hf://${model.id}")`,
+];
+
+const allennlpQuestionAnswering = (model: ModelData) => [
+	`import allennlp_models
+from allennlp.predictors.predictor import Predictor
+
+predictor = Predictor.from_path("hf://${model.id}")
+predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "question": "Where do I live?"}
+predictions = predictor.predict_json(predictor_input)`,
+];
+
+const allennlp = (model: ModelData) => {
+	if (model.tags?.includes("question-answering")) {
+		return allennlpQuestionAnswering(model);
+	}
+	return allennlpUnknown(model);
+};
+
+const asteroid = (model: ModelData) => [
+	`from asteroid.models import BaseModel
+
+model = BaseModel.from_pretrained("${model.id}")`,
+];
+
+function get_base_diffusers_model(model: ModelData): string {
+	return model.cardData?.base_model ?? "fill-in-base-model";
+}
+
+const bertopic = (model: ModelData) => [
+	`from bertopic import BERTopic
+
+model = BERTopic.load("${model.id}")`,
+];
+
+const diffusers_default = (model: ModelData) => [
+	`from diffusers import DiffusionPipeline
+
+pipeline = DiffusionPipeline.from_pretrained("${model.id}")`,
+];
+
+const diffusers_controlnet = (model: ModelData) => [
+	`from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
+
+controlnet = ControlNetModel.from_pretrained("${model.id}")
+pipeline = StableDiffusionControlNetPipeline.from_pretrained(
+	"${get_base_diffusers_model(model)}", controlnet=controlnet
+)`,
+];
+
+const diffusers_lora = (model: ModelData) => [
+	`from diffusers import DiffusionPipeline
+
+pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}")
+pipeline.load_lora_weights("${model.id}")`,
+];
+
+const diffusers_textual_inversion = (model: ModelData) => [
+	`from diffusers import DiffusionPipeline
+
+pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}")
+pipeline.load_textual_inversion("${model.id}")`,
+];
+
+const diffusers = (model: ModelData) => {
+	if (model.tags?.includes("controlnet")) {
+		return diffusers_controlnet(model);
+	} else if (model.tags?.includes("lora")) {
+		return diffusers_lora(model);
+	} else if (model.tags?.includes("textual_inversion")) {
+		return diffusers_textual_inversion(model);
+	} else {
+		return diffusers_default(model);
+	}
+};
+
+const espnetTTS = (model: ModelData) => [
+	`from espnet2.bin.tts_inference import Text2Speech
+
+model = Text2Speech.from_pretrained("${model.id}")
+
+speech, *_ = model("text to generate speech from")`,
+];
+
+const espnetASR = (model: ModelData) => [
+	`from espnet2.bin.asr_inference import Speech2Text
+
+model = Speech2Text.from_pretrained(
+  "${model.id}"
+)
+
+speech, rate = soundfile.read("speech.wav")
+text, *_ = model(speech)[0]`,
+];
+
+const espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`];
+
+const espnet = (model: ModelData) => {
+	if (model.tags?.includes("text-to-speech")) {
+		return espnetTTS(model);
+	} else if (model.tags?.includes("automatic-speech-recognition")) {
+		return espnetASR(model);
+	}
+	return espnetUnknown();
+};
+
+const fairseq = (model: ModelData) => [
+	`from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
+
+models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
+    "${model.id}"
+)`,
+];
+
+const flair = (model: ModelData) => [
+	`from flair.models import SequenceTagger
+
+tagger = SequenceTagger.load("${model.id}")`,
+];
+
+const keras = (model: ModelData) => [
+	`from huggingface_hub import from_pretrained_keras
+
+model = from_pretrained_keras("${model.id}")
+`,
+];
+
+const open_clip = (model: ModelData) => [
+	`import open_clip
+
+model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}')
+tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')`,
+];
+
+const paddlenlp = (model: ModelData) => {
+	if (model.config?.architectures?.[0]) {
+		const architecture = model.config.architectures[0];
+		return [
+			[
+				`from paddlenlp.transformers import AutoTokenizer, ${architecture}`,
+				"",
+				`tokenizer = AutoTokenizer.from_pretrained("${model.id}"${
+					model.private ? ", use_auth_token=True" : ""
+				}, from_hf_hub=True)`,
+				`model = ${architecture}.from_pretrained("${model.id}"${
+					model.private ? ", use_auth_token=True" : ""
+				}, from_hf_hub=True)`,
+			].join("\n"),
+		];
+	} else {
+		return [
+			[
+				`# ⚠️ Type of model unknown`,
+				`from paddlenlp.transformers import AutoTokenizer, AutoModel`,
+				"",
+				`tokenizer = AutoTokenizer.from_pretrained("${model.id}"${
+					model.private ? ", use_auth_token=True" : ""
+				}, from_hf_hub=True)`,
+				`model = AutoModel.from_pretrained("${model.id}"${
+					model.private ? ", use_auth_token=True" : ""
+				}, from_hf_hub=True)`,
+			].join("\n"),
+		];
+	}
+};
+
+const pyannote_audio_pipeline = (model: ModelData) => [
+	`from pyannote.audio import Pipeline
+  
+pipeline = Pipeline.from_pretrained("${model.id}")
+
+# inference on the whole file
+pipeline("file.wav")
+
+# inference on an excerpt
+from pyannote.core import Segment
+excerpt = Segment(start=2.0, end=5.0)
+
+from pyannote.audio import Audio
+waveform, sample_rate = Audio().crop("file.wav", excerpt)
+pipeline({"waveform": waveform, "sample_rate": sample_rate})`,
+];
+
+const pyannote_audio_model = (model: ModelData) => [
+	`from pyannote.audio import Model, Inference
+
+model = Model.from_pretrained("${model.id}")
+inference = Inference(model)
+
+# inference on the whole file
+inference("file.wav")
+
+# inference on an excerpt
+from pyannote.core import Segment
+excerpt = Segment(start=2.0, end=5.0)
+inference.crop("file.wav", excerpt)`,
+];
+
+const pyannote_audio = (model: ModelData) => {
+	if (model.tags?.includes("pyannote-audio-pipeline")) {
+		return pyannote_audio_pipeline(model);
+	}
+	return pyannote_audio_model(model);
+};
+
+const tensorflowttsTextToMel = (model: ModelData) => [
+	`from tensorflow_tts.inference import AutoProcessor, TFAutoModel
+
+processor = AutoProcessor.from_pretrained("${model.id}")
+model = TFAutoModel.from_pretrained("${model.id}")
+`,
+];
+
+const tensorflowttsMelToWav = (model: ModelData) => [
+	`from tensorflow_tts.inference import TFAutoModel
+
+model = TFAutoModel.from_pretrained("${model.id}")
+audios = model.inference(mels)
+`,
+];
+
+const tensorflowttsUnknown = (model: ModelData) => [
+	`from tensorflow_tts.inference import TFAutoModel
+
+model = TFAutoModel.from_pretrained("${model.id}")
+`,
+];
+
+const tensorflowtts = (model: ModelData) => {
+	if (model.tags?.includes("text-to-mel")) {
+		return tensorflowttsTextToMel(model);
+	} else if (model.tags?.includes("mel-to-wav")) {
+		return tensorflowttsMelToWav(model);
+	}
+	return tensorflowttsUnknown(model);
+};
+
+const timm = (model: ModelData) => [
+	`import timm
+
+model = timm.create_model("hf_hub:${model.id}", pretrained=True)`,
+];
+
+const skopsPickle = (model: ModelData, modelFile: string) => {
+	return [
+		`import joblib
+from skops.hub_utils import download
+download("${model.id}", "path_to_folder")
+model = joblib.load(
+	"${modelFile}"
+)
+# only load pickle files from sources you trust
+# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`,
+	];
+};
+
+const skopsFormat = (model: ModelData, modelFile: string) => {
+	return [
+		`from skops.hub_utils import download
+from skops.io import load
+download("${model.id}", "path_to_folder")
+# make sure model file is in skops format
+# if model is a pickle file, make sure it's from a source you trust
+model = load("path_to_folder/${modelFile}")`,
+	];
+};
+
+const skopsJobLib = (model: ModelData) => {
+	return [
+		`from huggingface_hub import hf_hub_download
+import joblib
+model = joblib.load(
+	hf_hub_download("${model.id}", "sklearn_model.joblib")
+)
+# only load pickle files from sources you trust
+# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`,
+	];
+};
+
+const sklearn = (model: ModelData) => {
+	if (model.tags?.includes("skops")) {
+		const skopsmodelFile = model.config?.sklearn?.filename;
+		const skopssaveFormat = model.config?.sklearn?.model_format;
+		if (!skopsmodelFile) {
+			return [`# ⚠️ Model filename not specified in config.json`];
+		}
+		if (skopssaveFormat === "pickle") {
+			return skopsPickle(model, skopsmodelFile);
+		} else {
+			return skopsFormat(model, skopsmodelFile);
+		}
+	} else {
+		return skopsJobLib(model);
+	}
+};
+
+const fastai = (model: ModelData) => [
+	`from huggingface_hub import from_pretrained_fastai
+
+learn = from_pretrained_fastai("${model.id}")`,
+];
+
+const sampleFactory = (model: ModelData) => [
+	`python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir`,
+];
+
+const sentenceTransformers = (model: ModelData) => [
+	`from sentence_transformers import SentenceTransformer
+
+model = SentenceTransformer("${model.id}")`,
+];
+
+const spacy = (model: ModelData) => [
+	`!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl
+
+# Using spacy.load().
+import spacy
+nlp = spacy.load("${nameWithoutNamespace(model.id)}")
+
+# Importing as module.
+import ${nameWithoutNamespace(model.id)}
+nlp = ${nameWithoutNamespace(model.id)}.load()`,
+];
+
+const span_marker = (model: ModelData) => [
+	`from span_marker import SpanMarkerModel
+
+model = SpanMarkerModel.from_pretrained("${model.id}")`,
+];
+
+const stanza = (model: ModelData) => [
+	`import stanza
+
+stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}")
+nlp = stanza.Pipeline("${nameWithoutNamespace(model.id).replace("stanza-", "")}")`,
+];
+
+const speechBrainMethod = (speechbrainInterface: string) => {
+	switch (speechbrainInterface) {
+		case "EncoderClassifier":
+			return "classify_file";
+		case "EncoderDecoderASR":
+		case "EncoderASR":
+			return "transcribe_file";
+		case "SpectralMaskEnhancement":
+			return "enhance_file";
+		case "SepformerSeparation":
+			return "separate_file";
+		default:
+			return undefined;
+	}
+};
+
+const speechbrain = (model: ModelData) => {
+	const speechbrainInterface = model.config?.speechbrain?.interface;
+	if (speechbrainInterface === undefined) {
+		return [`# interface not specified in config.json`];
+	}
+
+	const speechbrainMethod = speechBrainMethod(speechbrainInterface);
+	if (speechbrainMethod === undefined) {
+		return [`# interface in config.json invalid`];
+	}
+
+	return [
+		`from speechbrain.pretrained import ${speechbrainInterface}
+model = ${speechbrainInterface}.from_hparams(
+  "${model.id}"
+)
+model.${speechbrainMethod}("file.wav")`,
+	];
+};
+
+const transformers = (model: ModelData) => {
+	const info = model.transformersInfo;
+	if (!info) {
+		return [`# ⚠️ Type of model unknown`];
+	}
+	const remote_code_snippet = info.custom_class ? ", trust_remote_code=True" : "";
+
+	let autoSnippet: string;
+	if (info.processor) {
+		const varName =
+			info.processor === "AutoTokenizer"
+				? "tokenizer"
+				: info.processor === "AutoFeatureExtractor"
+				? "extractor"
+				: "processor";
+		autoSnippet = [
+			"# Load model directly",
+			`from transformers import ${info.processor}, ${info.auto_model}`,
+			"",
+			`${varName} = ${info.processor}.from_pretrained("${model.id}"` + remote_code_snippet + ")",
+			`model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")",
+		].join("\n");
+	} else {
+		autoSnippet = [
+			"# Load model directly",
+			`from transformers import ${info.auto_model}`,
+			`model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")",
+		].join("\n");
+	}
+
+	if (model.pipeline_tag) {
+		const pipelineSnippet = [
+			"# Use a pipeline as a high-level helper",
+			"from transformers import pipeline",
+			"",
+			`pipe = pipeline("${model.pipeline_tag}", model="${model.id}"` + remote_code_snippet + ")",
+		].join("\n");
+		return [pipelineSnippet, autoSnippet];
+	}
+	return [autoSnippet];
+};
+
+const transformersJS = (model: ModelData) => {
+	if (!model.pipeline_tag) {
+		return [`// ⚠️ Unknown pipeline tag`];
+	}
+
+	const libName = "@xenova/transformers";
+
+	return [
+		`// npm i ${libName}
+import { pipeline } from '${libName}';
+
+// Allocate pipeline
+const pipe = await pipeline('${model.pipeline_tag}', '${model.id}');`,
+	];
+};
+
+const peftTask = (peftTaskType?: string) => {
+	switch (peftTaskType) {
+		case "CAUSAL_LM":
+			return "CausalLM";
+		case "SEQ_2_SEQ_LM":
+			return "Seq2SeqLM";
+		case "TOKEN_CLS":
+			return "TokenClassification";
+		case "SEQ_CLS":
+			return "SequenceClassification";
+		default:
+			return undefined;
+	}
+};
+
+const peft = (model: ModelData) => {
+	const { base_model_name: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
+	const pefttask = peftTask(peftTaskType);
+	if (!pefttask) {
+		return [`Task type is invalid.`];
+	}
+	if (!peftBaseModel) {
+		return [`Base model is not found.`];
+	}
+
+	return [
+		`from peft import PeftModel, PeftConfig
+from transformers import AutoModelFor${pefttask}
+
+config = PeftConfig.from_pretrained("${model.id}")
+model = AutoModelFor${pefttask}.from_pretrained("${peftBaseModel}")
+model = PeftModel.from_pretrained(model, "${model.id}")`,
+	];
+};
+
+const fasttext = (model: ModelData) => [
+	`from huggingface_hub import hf_hub_download
+import fasttext
+
+model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))`,
+];
+
+const stableBaselines3 = (model: ModelData) => [
+	`from huggingface_sb3 import load_from_hub
+checkpoint = load_from_hub(
+	repo_id="${model.id}",
+	filename="{MODEL FILENAME}.zip",
+)`,
+];
+
+const nemoDomainResolver = (domain: string, model: ModelData): string[] | undefined => {
+	switch (domain) {
+		case "ASR":
+			return [
+				`import nemo.collections.asr as nemo_asr
+asr_model = nemo_asr.models.ASRModel.from_pretrained("${model.id}")
+
+transcriptions = asr_model.transcribe(["file.wav"])`,
+			];
+		default:
+			return undefined;
+	}
+};
+
+const mlAgents = (model: ModelData) => [`mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./downloads"`];
+
+const nemo = (model: ModelData) => {
+	let command: string[] | undefined = undefined;
+	// Resolve the tag to a nemo domain/sub-domain
+	if (model.tags?.includes("automatic-speech-recognition")) {
+		command = nemoDomainResolver("ASR", model);
+	}
+
+	return command ?? [`# tag did not correspond to a valid NeMo domain.`];
+};
+
+const pythae = (model: ModelData) => [
+	`from pythae.models import AutoModel
+
+model = AutoModel.load_from_hf_hub("${model.id}")`,
+];
+
+//#endregion
+
+export const MODEL_LIBRARIES_UI_ELEMENTS: Partial<Record<ModelLibraryKey, LibraryUiElement>> = {
+	"adapter-transformers": {
+		btnLabel: "Adapter Transformers",
+		repoName: "adapter-transformers",
+		repoUrl: "https://github.com/Adapter-Hub/adapter-transformers",
+		docsUrl: "https://huggingface.co/docs/hub/adapter-transformers",
+		snippets: adapter_transformers,
+	},
+	allennlp: {
+		btnLabel: "AllenNLP",
+		repoName: "AllenNLP",
+		repoUrl: "https://github.com/allenai/allennlp",
+		docsUrl: "https://huggingface.co/docs/hub/allennlp",
+		snippets: allennlp,
+	},
+	asteroid: {
+		btnLabel: "Asteroid",
+		repoName: "Asteroid",
+		repoUrl: "https://github.com/asteroid-team/asteroid",
+		docsUrl: "https://huggingface.co/docs/hub/asteroid",
+		snippets: asteroid,
+	},
+	bertopic: {
+		btnLabel: "BERTopic",
+		repoName: "BERTopic",
+		repoUrl: "https://github.com/MaartenGr/BERTopic",
+		snippets: bertopic,
+	},
+	diffusers: {
+		btnLabel: "Diffusers",
+		repoName: "🤗/diffusers",
+		repoUrl: "https://github.com/huggingface/diffusers",
+		docsUrl: "https://huggingface.co/docs/hub/diffusers",
+		snippets: diffusers,
+	},
+	espnet: {
+		btnLabel: "ESPnet",
+		repoName: "ESPnet",
+		repoUrl: "https://github.com/espnet/espnet",
+		docsUrl: "https://huggingface.co/docs/hub/espnet",
+		snippets: espnet,
+	},
+	fairseq: {
+		btnLabel: "Fairseq",
+		repoName: "fairseq",
+		repoUrl: "https://github.com/pytorch/fairseq",
+		snippets: fairseq,
+	},
+	flair: {
+		btnLabel: "Flair",
+		repoName: "Flair",
+		repoUrl: "https://github.com/flairNLP/flair",
+		docsUrl: "https://huggingface.co/docs/hub/flair",
+		snippets: flair,
+	},
+	keras: {
+		btnLabel: "Keras",
+		repoName: "Keras",
+		repoUrl: "https://github.com/keras-team/keras",
+		docsUrl: "https://huggingface.co/docs/hub/keras",
+		snippets: keras,
+	},
+	nemo: {
+		btnLabel: "NeMo",
+		repoName: "NeMo",
+		repoUrl: "https://github.com/NVIDIA/NeMo",
+		snippets: nemo,
+	},
+	open_clip: {
+		btnLabel: "OpenCLIP",
+		repoName: "OpenCLIP",
+		repoUrl: "https://github.com/mlfoundations/open_clip",
+		snippets: open_clip,
+	},
+	paddlenlp: {
+		btnLabel: "paddlenlp",
+		repoName: "PaddleNLP",
+		repoUrl: "https://github.com/PaddlePaddle/PaddleNLP",
+		docsUrl: "https://huggingface.co/docs/hub/paddlenlp",
+		snippets: paddlenlp,
+	},
+	peft: {
+		btnLabel: "PEFT",
+		repoName: "PEFT",
+		repoUrl: "https://github.com/huggingface/peft",
+		snippets: peft,
+	},
+	"pyannote-audio": {
+		btnLabel: "pyannote.audio",
+		repoName: "pyannote-audio",
+		repoUrl: "https://github.com/pyannote/pyannote-audio",
+		snippets: pyannote_audio,
+	},
+	"sentence-transformers": {
+		btnLabel: "sentence-transformers",
+		repoName: "sentence-transformers",
+		repoUrl: "https://github.com/UKPLab/sentence-transformers",
+		docsUrl: "https://huggingface.co/docs/hub/sentence-transformers",
+		snippets: sentenceTransformers,
+	},
+	sklearn: {
+		btnLabel: "Scikit-learn",
+		repoName: "Scikit-learn",
+		repoUrl: "https://github.com/scikit-learn/scikit-learn",
+		snippets: sklearn,
+	},
+	fastai: {
+		btnLabel: "fastai",
+		repoName: "fastai",
+		repoUrl: "https://github.com/fastai/fastai",
+		docsUrl: "https://huggingface.co/docs/hub/fastai",
+		snippets: fastai,
+	},
+	spacy: {
+		btnLabel: "spaCy",
+		repoName: "spaCy",
+		repoUrl: "https://github.com/explosion/spaCy",
+		docsUrl: "https://huggingface.co/docs/hub/spacy",
+		snippets: spacy,
+	},
+	"span-marker": {
+		btnLabel: "SpanMarker",
+		repoName: "SpanMarkerNER",
+		repoUrl: "https://github.com/tomaarsen/SpanMarkerNER",
+		docsUrl: "https://huggingface.co/docs/hub/span_marker",
+		snippets: span_marker,
+	},
+	speechbrain: {
+		btnLabel: "speechbrain",
+		repoName: "speechbrain",
+		repoUrl: "https://github.com/speechbrain/speechbrain",
+		docsUrl: "https://huggingface.co/docs/hub/speechbrain",
+		snippets: speechbrain,
+	},
+	stanza: {
+		btnLabel: "Stanza",
+		repoName: "stanza",
+		repoUrl: "https://github.com/stanfordnlp/stanza",
+		docsUrl: "https://huggingface.co/docs/hub/stanza",
+		snippets: stanza,
+	},
+	tensorflowtts: {
+		btnLabel: "TensorFlowTTS",
+		repoName: "TensorFlowTTS",
+		repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS",
+		snippets: tensorflowtts,
+	},
+	timm: {
+		btnLabel: "timm",
+		repoName: "pytorch-image-models",
+		repoUrl: "https://github.com/rwightman/pytorch-image-models",
+		docsUrl: "https://huggingface.co/docs/hub/timm",
+		snippets: timm,
+	},
+	transformers: {
+		btnLabel: "Transformers",
+		repoName: "🤗/transformers",
+		repoUrl: "https://github.com/huggingface/transformers",
+		docsUrl: "https://huggingface.co/docs/hub/transformers",
+		snippets: transformers,
+	},
+	"transformers.js": {
+		btnLabel: "Transformers.js",
+		repoName: "transformers.js",
+		repoUrl: "https://github.com/xenova/transformers.js",
+		docsUrl: "https://huggingface.co/docs/hub/transformers-js",
+		snippets: transformersJS,
+	},
+	fasttext: {
+		btnLabel: "fastText",
+		repoName: "fastText",
+		repoUrl: "https://fasttext.cc/",
+		snippets: fasttext,
+	},
+	"sample-factory": {
+		btnLabel: "sample-factory",
+		repoName: "sample-factory",
+		repoUrl: "https://github.com/alex-petrenko/sample-factory",
+		docsUrl: "https://huggingface.co/docs/hub/sample-factory",
+		snippets: sampleFactory,
+	},
+	"stable-baselines3": {
+		btnLabel: "stable-baselines3",
+		repoName: "stable-baselines3",
+		repoUrl: "https://github.com/huggingface/huggingface_sb3",
+		docsUrl: "https://huggingface.co/docs/hub/stable-baselines3",
+		snippets: stableBaselines3,
+	},
+	"ml-agents": {
+		btnLabel: "ml-agents",
+		repoName: "ml-agents",
+		repoUrl: "https://github.com/huggingface/ml-agents",
+		docsUrl: "https://huggingface.co/docs/hub/ml-agents",
+		snippets: mlAgents,
+	},
+	pythae: {
+		btnLabel: "pythae",
+		repoName: "pythae",
+		repoUrl: "https://github.com/clementchadebec/benchmark_VAE",
+		snippets: pythae,
+	},
+} as const;
diff --git a/packages/widgets/src/lib/interfaces/LibrariesToTasks.ts b/packages/widgets/src/lib/interfaces/LibrariesToTasks.ts
new file mode 100644
index 0000000000000000000000000000000000000000..780323f9f5514a3eb2d5dc3f6f4afb535c59352e
--- /dev/null
+++ b/packages/widgets/src/lib/interfaces/LibrariesToTasks.ts
@@ -0,0 +1,46 @@
+import type { ModelLibraryKey, PipelineType } from "@huggingface/tasks";
+
+/**
+ * Mapping from library name (excluding Transformers) to its supported tasks.
+ * Inference API should be disabled for all other (library, task) pairs beyond this mapping.
+ * As an exception, we assume Transformers supports all inference tasks.
+ * This mapping is generated automatically by "python-api-export-tasks" action in huggingface/api-inference-community repo upon merge.
+ * Ref: https://github.com/huggingface/api-inference-community/pull/158
+ */
+export const LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS: Partial<Record<ModelLibraryKey, PipelineType[]>> = {
+	"adapter-transformers": ["question-answering", "text-classification", "token-classification"],
+	allennlp: ["question-answering"],
+	asteroid: [
+		// "audio-source-separation",
+		"audio-to-audio",
+	],
+	bertopic: ["text-classification"],
+	diffusers: ["image-to-image", "text-to-image"],
+	doctr: ["object-detection"],
+	espnet: ["text-to-speech", "automatic-speech-recognition"],
+	fairseq: ["text-to-speech", "audio-to-audio"],
+	fastai: ["image-classification"],
+	fasttext: ["feature-extraction", "text-classification"],
+	flair: ["token-classification"],
+	k2: ["automatic-speech-recognition"],
+	keras: ["image-classification"],
+	nemo: ["automatic-speech-recognition"],
+	open_clip: ["zero-shot-classification", "zero-shot-image-classification"],
+	paddlenlp: ["conversational", "fill-mask", "summarization", "zero-shot-classification"],
+	peft: ["text-generation"],
+	"pyannote-audio": ["automatic-speech-recognition"],
+	"sentence-transformers": ["feature-extraction", "sentence-similarity"],
+	sklearn: ["tabular-classification", "tabular-regression", "text-classification"],
+	spacy: ["token-classification", "text-classification", "sentence-similarity"],
+	"span-marker": ["token-classification"],
+	speechbrain: [
+		"audio-classification",
+		"audio-to-audio",
+		"automatic-speech-recognition",
+		"text-to-speech",
+		"text2text-generation",
+	],
+	stanza: ["token-classification"],
+	timm: ["image-classification"],
+	mindspore: ["image-classification"],
+};
diff --git a/packages/widgets/src/lib/interfaces/README.md b/packages/widgets/src/lib/interfaces/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d2de7746091e0410a6c941421dc3f1a484c14baf
--- /dev/null
+++ b/packages/widgets/src/lib/interfaces/README.md
@@ -0,0 +1,13 @@
+# Interfaces
+
+This folder contains the definition files (written in Typescript) for the huggingface.co hub's:
+
+- **pipeline types** a.k.a. **task types** (used to determine which widget to display on the model page, and which inference API to run)
+- **default widget inputs** (when they aren't provided in the model card)
+- definitions and UI elements for **third party libraries**.
+
+Please add to any of those definitions by opening a PR. Thanks 🔥
+
+⚠️ The hub's definitive doc is at https://huggingface.co/docs/hub.
+
+## Feedback (feature requests, bugs, etc.) is super welcome 💙💚💛💜♥️🧡
diff --git a/packages/widgets/src/lib/interfaces/Types.ts b/packages/widgets/src/lib/interfaces/Types.ts
new file mode 100644
index 0000000000000000000000000000000000000000..7c9ef752fbf0e268aa7474c7fa96c4100df96a3f
--- /dev/null
+++ b/packages/widgets/src/lib/interfaces/Types.ts
@@ -0,0 +1,111 @@
+import type { PipelineType } from "@huggingface/tasks";
+import type { WidgetExample } from "../components/InferenceWidget/shared/WidgetExample.js";
+import type { InferenceDisplayability } from "./InferenceDisplayability.js";
+
+/**
+ * Public interface for model metadata
+ */
+export interface ModelData {
+	/**
+	 * id of model (e.g. 'user/repo_name')
+	 */
+	id: string;
+	/**
+	 * Kept for backward compatibility
+	 */
+	modelId?: string;
+	/**
+	 * Whether or not to enable inference widget for this model
+	 */
+	inference: InferenceDisplayability;
+	/**
+	 * is this model private?
+	 */
+	private?: boolean;
+	/**
+	 * this dictionary has useful information about the model configuration
+	 */
+	config?: Record<string, unknown> & {
+		adapter_transformers?: { model_class?: string; model_name?: string };
+		architectures?: string[];
+		sklearn?: {
+			filename?: string;
+			model_format?: string;
+		};
+		speechbrain?: {
+			interface?: string;
+		};
+		peft?: {
+			base_model_name?: string;
+			task_type?: string;
+		};
+	};
+	/**
+	 * all the model tags
+	 */
+	tags?: string[];
+	/**
+	 * transformers-specific info to display in the code sample.
+	 */
+	transformersInfo?: TransformersInfo;
+	/**
+	 * Pipeline type
+	 */
+	pipeline_tag?: PipelineType | undefined;
+	/**
+	 * for relevant models, get mask token
+	 */
+	mask_token?: string | undefined;
+	/**
+	 * Example data that will be fed into the widget.
+	 *
+	 * can be set in the model card metadata (under `widget`),
+	 * or by default in `DefaultWidget.ts`
+	 */
+	widgetData?: WidgetExample[] | undefined;
+	/**
+	 * Parameters that will be used by the widget when calling Inference API
+	 * https://huggingface.co/docs/api-inference/detailed_parameters
+	 *
+	 * can be set in the model card metadata (under `inference/parameters`)
+	 * Example:
+	 * inference:
+	 *     parameters:
+	 *         key: val
+	 */
+	cardData?: {
+		inference?:
+			| boolean
+			| {
+					parameters?: Record<string, unknown>;
+			  };
+		base_model?: string;
+	};
+	/**
+	 * Library name
+	 * Example: transformers, SpeechBrain, Stanza, etc.
+	 */
+	library_name?: string;
+}
+
+/**
+ * transformers-specific info to display in the code sample.
+ */
+export interface TransformersInfo {
+	/**
+	 * e.g. AutoModelForSequenceClassification
+	 */
+	auto_model: string;
+	/**
+	 * if set in config.json's auto_map
+	 */
+	custom_class?: string;
+	/**
+	 * e.g. text-classification
+	 */
+	pipeline_tag?: PipelineType;
+	/**
+	 * e.g. "AutoTokenizer" | "AutoFeatureExtractor" | "AutoProcessor"
+	 */
+	processor?: string;
+}
diff --git a/packages/widgets/src/lib/utils/ViewUtils.ts b/packages/widgets/src/lib/utils/ViewUtils.ts
new file mode 100644
index 0000000000000000000000000000000000000000..86cdd0df1401714eb33a5034689f57a63016445c
--- /dev/null
+++ b/packages/widgets/src/lib/utils/ViewUtils.ts
@@ -0,0 +1,138 @@
+import type { PipelineType } from "@huggingface/tasks";
+
+const ESCAPED = {
+	'"': "&quot;",
+	"'": "&#39;",
+	"&": "&amp;",
+	"<": "&lt;",
+	">": "&gt;",
+};
+
+/**
+ *  Returns a function that clamps input value to range [min <= x <= max].
+ */
+export function clamp(x: number, min: number, max: number): number {
+	return Math.max(min, Math.min(x, max));
+}
+
+/**
+ * Similar to lodash's uniqBy. In case of multiple items with the same key,
+ * only the first one is kept.
+ */
+export function uniqBy<T, K>(items: T[], itemToKey: (item: T) => K): T[] {
+	const keys = new Set(items.map((item) => itemToKey(item)));
+
+	return items.filter((item) => {
+		// Will return true if was in set - e.g. was the first item with its key.
+		return keys.delete(itemToKey(item));
+	});
+}
+
+export function typedKeys<K extends string, V>(obj: { [k in K]: V }): K[] {
+	return Object.keys(obj) as K[];
+}
+
+/**
+ * HTML escapes the passed string
+ */
+export function escape(html: unknown): string {
+	return String(html).replace(/["'&<>]/g, (match) => ESCAPED[match as keyof typeof ESCAPED]);
+}
+
+/**
+ * Returns a promise that will resolve after the specified time
+ * @param ms Number of ms to wait
+ */
+export function delay(ms: number): Promise<void> {
+	return new Promise((resolve) => {
+		setTimeout(() => resolve(), ms);
+	});
+}
+
+/**
+ * "Real" modulo (always >= 0), not remainder.
+ */
+export function mod(a: number, n: number): number {
+	return ((a % n) + n) % n;
+}
+
+/**
+ * Sum of elements in array
+ */
+export function sum(arr: number[]): number {
+	return arr.reduce((a, b) => a + b, 0);
+}
+
+/**
+ * Return a random item from an array
+ */
+export function randomItem<T>(arr: T[]): T {
+	return arr[Math.floor(Math.random() * arr.length)];
+}
+
+/**
+ * Safely parse JSON
+ */
+export function parseJSON<T>(x: unknown): T | undefined {
+	if (!x || typeof x !== "string") {
+		return undefined;
+	}
+	try {
+		return JSON.parse(x);
+	} catch (e) {
+		if (e instanceof SyntaxError) {
+			console.error(e.name);
+		} else if (e instanceof Error) {
+			console.error(e.message);
+		} else {
+			console.error(e);
+		}
+		return undefined;
+	}
+}
+
+/**
+ * Return true if an HTML element is scrolled all the way
+ */
+export function isFullyScrolled(elt: HTMLElement): boolean {
+	return elt.scrollHeight - Math.abs(elt.scrollTop) === elt.clientHeight;
+}
+
+/**
+ * Smoothly scroll an element all the way
+ */
+export function scrollToMax(elt: HTMLElement, axis: "x" | "y" = "y"): void {
+	elt.scroll({
+		behavior: "smooth",
+		left: axis === "x" ? elt.scrollWidth : undefined,
+		top: axis === "y" ? elt.scrollHeight : undefined,
+	});
+}
+
+/**
+ * Converts hex string to rgb array (i.e. [r,g,b])
+ * original from https://stackoverflow.com/a/39077686/6558628
+ */
+export function hexToRgb(hex: string): number[] {
+	return (
+		hex
+			.replace(/^#?([a-f\d])([a-f\d])([a-f\d])$/i, (_, r, g, b) => "#" + r + r + g + g + b + b)
+			.substring(1)
+			.match(/.{2}/g)
+			?.map((x) => parseInt(x, 16)) || [0, 0, 0]
+	);
+}
+
+// Get the Task id corresponding to the modelPipeline (should be === in 99% cases)
+export function getPipelineTask(modelPipeline: PipelineType): PipelineType {
+	return modelPipeline === "text2text-generation" ? "text-generation" : modelPipeline;
+}
+
+/**
+* For Tailwind:
+bg-blue-100 border-blue-100 dark:bg-blue-800 dark:border-blue-800
+bg-green-100 border-green-100 dark:bg-green-800 dark:border-green-800
+bg-yellow-100 border-yellow-100 dark:bg-yellow-800 dark:border-yellow-800
+bg-purple-100 border-purple-100 dark:bg-purple-800 dark:border-purple-800
+bg-red-100 border-red-100 dark:bg-red-800 dark:border-red-800
+*/
diff --git a/packages/widgets/src/routes/+layout.svelte b/packages/widgets/src/routes/+layout.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..fd328954f1a4f36c0cc264102f6d6c709ef9001f
--- /dev/null
+++ b/packages/widgets/src/routes/+layout.svelte
@@ -0,0 +1,7 @@
+<script lang="ts">
+	import "../tailwind.css";
+</script>
+
+<div class="min-h-screen bg-gray-50 text-black dark:bg-gray-950">
+	<slot />
+</div>
diff --git a/packages/widgets/src/routes/+page.svelte b/packages/widgets/src/routes/+page.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..ad261beddb855a1567fc8af8c6d5a88e1837cbd7
--- /dev/null
+++ b/packages/widgets/src/routes/+page.svelte
@@ -0,0 +1,581 @@
+<script lang="ts">
+	import type { ModelData } from "$lib/interfaces/Types.js";
+
+	import InferenceWidget from "$lib/components/InferenceWidget/InferenceWidget.svelte";
+	import ModeSwitcher from "$lib/components/DemoThemeSwitcher/DemoThemeSwitcher.svelte";
+	import { InferenceDisplayability } from "$lib/interfaces/InferenceDisplayability.js";
+	import { onMount } from "svelte";
+
+	let apiToken = "";
+
+	function storeHFToken() {
+		window.localStorage.setItem("hf_token", apiToken);
+	}
+
+	onMount(() => {
+		const token = window.localStorage.getItem("hf_token");
+		if (token) {
+			apiToken = token;
+		}
+	});
+
+	const models: ModelData[] = [
+		{
+			id: "WizardLM/WizardLM-70B-V1.0",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "openai/clip-vit-base-patch16",
+			pipeline_tag: "zero-shot-image-classification",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "lllyasviel/sd-controlnet-canny",
+			pipeline_tag: "image-to-image",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "ydshieh/vit-gpt2-coco-en",
+			pipeline_tag: "image-to-text",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "impira/layoutlm-document-qa",
+			pipeline_tag: "document-question-answering",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					text: "What is the invoice number?",
+					src: "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png",
+				},
+				{
+					text: "What is the purchase amount?",
+					src: "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/contract.jpeg",
+				},
+			],
+		},
+		{
+			id: "skops/hf_hub_example-bdc26c1f-7e82-42eb-9657-0318315f2df0",
+			pipeline_tag: "tabular-classification",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "dandelin/vilt-b32-finetuned-vqa",
+			pipeline_tag: "visual-question-answering",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					text: "What animal is it?",
+					src: "https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg",
+				},
+				{
+					text: "Where is it?",
+					src: "https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg",
+				},
+			],
+		},
+		{
+			id: "roberta-large-mnli",
+			pipeline_tag: "text-classification",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					text: "I like you. I love you.",
+					group: "Contradiction",
+					example_title: "Foobar",
+					output: [
+						{ label: "Hello", score: 0.8 },
+						{ label: "Bye", score: 0.2 },
+					],
+				},
+				{ text: "This is good. This is bad.", group: "Contradiction" },
+				{ text: "He runs fast. He runs slow", group: "Contradiction" },
+				{ text: "I like you", group: "Neutral" },
+				{ text: "This is good", group: "Neutral" },
+				{ text: "He runs fast", group: "Neutral" },
+			],
+		},
+		{
+			id: "edbeeching/decision-transformer-gym-hopper-medium-replay",
+			pipeline_tag: "reinforcement-learning",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "sgugger/resnet50d",
+			pipeline_tag: "image-classification",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					src: "https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg",
+					example_title: "Tiger",
+				},
+				{
+					src: "https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg",
+					example_title: "Teapot",
+					output: [
+						{
+							label: "teapot: pot for brewing tea; usually has a spout and handle",
+							score: 0.8853782415390015,
+						},
+						{
+							label: "coffeepot: tall pot in which coffee is brewed",
+							score: 0.016733085736632347,
+						},
+						{
+							label: "water jug: a jug that holds water",
+							score: 0.0019129429711028934,
+						},
+						{
+							label: "cup: a punch served in a pitcher instead of a punch bowl",
+							score: 0.0009115593857131898,
+						},
+						{
+							label: "strainer: a filter to retain larger pieces while smaller pieces and liquids pass through",
+							score: 0.0007022042409516871,
+						},
+					],
+				},
+				{
+					src: "https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg",
+					example_title: "Palace",
+				},
+			],
+		},
+		{
+			id: "facebook/detr-resnet-50",
+			pipeline_tag: "object-detection",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "facebook/detr-resnet-50-panoptic",
+			pipeline_tag: "image-segmentation",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "julien-c/distilbert-feature-extraction",
+			pipeline_tag: "feature-extraction",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [{ text: "Hello world" }],
+		},
+		{
+			id: "sentence-transformers/distilbert-base-nli-stsb-mean-tokens",
+			pipeline_tag: "feature-extraction",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [{ text: "Hello, world" }],
+		},
+		{
+			id: "dbmdz/bert-large-cased-finetuned-conll03-english",
+			pipeline_tag: "token-classification",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{ text: "My name is Wolfgang and I live in Berlin" },
+				{ text: "My name is Sarah and I live in London" },
+				{ text: "My name is Clara and I live in Berkeley, California." },
+			],
+		},
+		{
+			id: "distilbert-base-uncased-distilled-squad",
+			pipeline_tag: "question-answering",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					text: "Which name is also used to describe the Amazon rainforest in English?",
+					context: `The Amazon rainforest (Portuguese: Floresta Amazônica or Amazônia; Spanish: Selva Amazónica, Amazonía or usually Amazonia; French: Forêt amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.`,
+				},
+			],
+		},
+		{
+			id: "t5-base",
+			pipeline_tag: "translation",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [{ text: "My name is Wolfgang and I live in Berlin" }],
+		},
+		{
+			id: "facebook/bart-large-cnn",
+			pipeline_tag: "summarization",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					text: "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.",
+				},
+			],
+		},
+		{
+			id: "gpt2",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{ text: "My name is Julien and I like to", output: { text: " code cool products with my friends." } },
+				{ text: "My name is Thomas and my main" },
+				{ text: "My name is Mariama, my favorite" },
+				{ text: "My name is Clara and I am" },
+				{ text: "Once upon a time," },
+			],
+		},
+		{
+			id: "bigscience/bloom",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{ text: "My name is Julien and I like to" },
+				{ text: "My name is Thomas and my main" },
+				{ text: "My name is Mariama, my favorite" },
+				{ text: "My name is Clara and I am" },
+				{ text: "Once upon a time," },
+			],
+		},
+		{
+			id: "distilroberta-base",
+			pipeline_tag: "fill-mask",
+			mask_token: "<mask>",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [{ text: "Paris is the <mask> of France." }, { text: "The goal of life is <mask>." }],
+		},
+		{
+			id: "facebook/bart-large-mnli",
+			pipeline_tag: "zero-shot-classification",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					text: "I have a problem with my iphone that needs to be resolved asap!!",
+					candidate_labels: "urgent, not urgent, phone, tablet, computer",
+					multi_class: true,
+				},
+			],
+		},
+		{
+			id: "google/tapas-base-finetuned-wtq",
+			pipeline_tag: "table-question-answering",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					text: "How many stars does the transformers repository have?",
+					table: {
+						Repository: ["Transformers", "Datasets", "Tokenizers"],
+						Stars: [36542, 4512, 3934],
+						Contributors: [651, 77, 34],
+						"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
+					},
+				},
+			],
+		},
+		{
+			id: "microsoft/tapex-base-finetuned-wtq",
+			pipeline_tag: "table-question-answering",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					text: "How many stars does the transformers repository have?",
+					table: {
+						Repository: ["Transformers", "Datasets", "Tokenizers"],
+						Stars: [36542, 4512, 3934],
+						Contributors: [651, 77, 34],
+						"Programming language": ["Python", "Python", "Rust, Python and NodeJS"],
+					},
+				},
+			],
+		},
+		{
+			id: "julien-c/wine-quality",
+			pipeline_tag: "tabular-classification",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					structured_data: {
+						fixed_acidity: [7.4, 7.8, 10.3],
+						volatile_acidity: [0.7, 0.88, 0.32],
+						citric_acid: [0.0, 0.0, 0.45],
+						residual_sugar: [1.9, 2.6, 6.4],
+						chlorides: [0.076, 0.098, 0.073],
+						free_sulfur_dioxide: [11.0, 25.0, 5.0],
+						total_sulfur_dioxide: [34.0, 67.0, 13.0],
+						density: [0.9978, 0.9968, 0.9976],
+						pH: [3.51, 3.2, 3.23],
+						sulphates: [0.56, 0.68, 0.82],
+						alcohol: [9.4, 9.8, 12.6],
+					},
+				},
+			],
+		},
+		{
+			id: "bigscience/T0pp",
+			pipeline_tag: "text2text-generation",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "facebook/blenderbot-400M-distill",
+			pipeline_tag: "conversational",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [{ text: "Hey my name is Julien! How are you?" }],
+		},
+		{
+			id: "osanseviero/BigGAN-deep-128",
+			pipeline_tag: "text-to-image",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					text: "a tiger",
+					output: {
+						url: "https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg",
+					},
+				},
+			],
+		},
+		{
+			id: "julien-c/kan-bayashi_csmsc_tacotron2",
+			pipeline_tag: "text-to-speech",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [{ text: "请您说得慢些好吗" }],
+		},
+		{
+			id: "superb/wav2vec2-base-superb-sid",
+			pipeline_tag: "audio-classification",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					example_title: "Librispeech sample 1",
+					src: "https://cdn-media.huggingface.co/speech_samples/sample1.flac",
+					output: [
+						{
+							score: 1,
+							label: "id10003",
+						},
+						{
+							score: 3.958137817505758e-9,
+							label: "id10912",
+						},
+						{
+							score: 2.8285052078302897e-9,
+							label: "id11089",
+						},
+						{
+							score: 2.4077480009765395e-9,
+							label: "id10017",
+						},
+						{
+							score: 1.3356071804082603e-9,
+							label: "id10045",
+						},
+					],
+				},
+			],
+		},
+		{
+			id: "julien-c/mini_an4_asr_train_raw_bpe_valid",
+			pipeline_tag: "automatic-speech-recognition",
+			inference: InferenceDisplayability.Yes,
+		},
+		{
+			id: "facebook/wav2vec2-base-960h",
+			pipeline_tag: "automatic-speech-recognition",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					example_title: "Librispeech sample 1",
+					src: "https://cdn-media.huggingface.co/speech_samples/sample1.flac",
+					output: {
+						text: "GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES IN DRAUGHTY SCHOOL ROOMS DAY AFTER DAY FOR A FORTNIGHT HE'LL HAVE TO PUT IN AN APPEARANCE AT SOME PLACE OF WORSHIP ON SUNDAY MORNING AND HE CAN COME TO US IMMEDIATELY AFTERWARDS",
+					},
+				},
+			],
+		},
+		{
+			id: "facebook/wav2vec2-large-xlsr-53-french",
+			pipeline_tag: "automatic-speech-recognition",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					example_title: "Librispeech sample 1",
+					src: "https://cdn-media.huggingface.co/speech_samples/sample1.flac",
+				},
+			],
+		},
+		{
+			id: "manandey/wav2vec2-large-xlsr-mongolian",
+			pipeline_tag: "automatic-speech-recognition",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					example_title: "Librispeech sample 1",
+					src: "https://cdn-media.huggingface.co/speech_samples/sample1.flac",
+				},
+			],
+		},
+		{
+			id: "osanseviero/full-sentence-distillroberta2",
+			pipeline_tag: "sentence-similarity",
+			inference: InferenceDisplayability.Yes,
+			widgetData: [
+				{
+					source_sentence: "That is a happy person",
+					sentences: ["That is a happy dog", "That is a very happy person", "Today is a sunny day"],
+				},
+			],
+		},
+		{
+			id: "speechbrain/mtl-mimic-voicebank",
+			private: false,
+			pipeline_tag: "audio-to-audio",
+			inference: InferenceDisplayability.Yes,
+			tags: ["speech-enhancement"],
+			widgetData: [],
+		},
+		{
+			id: "speechbrain/sepformer-wham",
+			private: false,
+			pipeline_tag: "audio-to-audio",
+			inference: InferenceDisplayability.Yes,
+			tags: ["audio-source-separation"],
+			widgetData: [],
+		},
+		{
+			id: "julien-c/DPRNNTasNet-ks16_WHAM_sepclean",
+			private: false,
+			pipeline_tag: "audio-to-audio",
+			inference: InferenceDisplayability.Yes,
+			tags: ["audio-source-separation"],
+			widgetData: [],
+		},
+	];
+
+	const modelsDisabled: ModelData[] = [
+		{
+			id: "gpt2",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.CustomCode,
+		},
+		{
+			id: "gpt2",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.ExplicitOptOut,
+		},
+		{
+			id: "gpt2",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.LibraryNotDetected,
+		},
+		{
+			id: "gpt2",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.PipelineLibraryPairNotSupported,
+		},
+		{
+			id: "gpt2",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.PipelineNotDetected,
+		},
+		{
+			id: "Phind/Phind-CodeLlama-34B-v1",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.Yes,
+		},
+	];
+
+	const modelsDisabledWithExamples: ModelData[] = [
+		{
+			id: "superb/wav2vec2-base-superb-sid",
+			pipeline_tag: "audio-classification",
+			inference: InferenceDisplayability.CustomCode,
+			widgetData: [
+				{
+					example_title: "Librispeech sample 1",
+					src: "https://cdn-media.huggingface.co/speech_samples/sample1.flac",
+					output: [
+						{
+							score: 1,
+							label: "id10003",
+						},
+						{
+							score: 3.958137817505758e-9,
+							label: "id10912",
+						},
+					],
+				},
+			],
+		},
+		{
+			id: "osanseviero/BigGAN-deep-128",
+			pipeline_tag: "text-to-image",
+			inference: InferenceDisplayability.LibraryNotDetected,
+			widgetData: [
+				{
+					text: "a tiger",
+					output: {
+						url: "https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg",
+					},
+				},
+			],
+		},
+		{
+			id: "gpt2",
+			pipeline_tag: "text-generation",
+			inference: InferenceDisplayability.PipelineNotDetected,
+			widgetData: [
+				// the widget should only show sample with output here
+				{ text: "My name is Julien and I like to", output: { text: "code cool products with my friends." } },
+				{ text: "My name is Thomas and my main" },
+				{ text: "My name is Mariama, my favorite" },
+				{ text: "My name is Clara and I am" },
+				{ text: "Once upon a time," },
+			],
+		},
+	];
+</script>
+
+<div class="flex flex-col gap-6 py-12 px-4">
+	<ModeSwitcher />
+
+	<label>
+		First, Enter HF token
+		<input class="form-input" type="text" bind:value={apiToken} placeholder="hf_..." on:change={storeHFToken} />
+	</label>
+
+	<div>
+		<h1 class="mb-8 text-4xl font-semibold">Showcase of all types of inference widgets running</h1>
+		<div class="grid gap-4 w-full" style="grid-template-columns: repeat(auto-fill, minmax(400px, 1fr));">
+			{#each models as model}
+				<div>
+					<a class="mb-3 block text-xs text-gray-300" href="/{model.id}">
+						<code>{model.id}</code>
+					</a>
+					<div class="rounded-xl bg-white p-5 shadow-sm">
+						<InferenceWidget {apiToken} {model} />
+					</div>
+				</div>
+			{/each}
+		</div>
+	</div>
+
+	<div>
+		<h1 class="mb-8 text-4xl font-semibold">Showcase of all types of disabled inference</h1>
+		<div class="grid gap-4 w-full" style="grid-template-columns: repeat(auto-fill, minmax(400px, 1fr));">
+			{#each modelsDisabled as model}
+				<div>
+					<a class="mb-3 block text-xs text-gray-300" href="/{model.id}">
+						<code>{model.id}</code>
+					</a>
+					<div class="max-w-md rounded-xl bg-white p-5 shadow-sm">
+						<InferenceWidget {apiToken} {model} />
+					</div>
+				</div>
+			{/each}
+		</div>
+	</div>
+
+	<div>
+		<h1 class="mb-8 text-4xl font-semibold">Showcase of all types of disabled inference with example outputs</h1>
+		<div class="grid gap-4 w-full" style="grid-template-columns: repeat(auto-fill, minmax(400px, 1fr));">
+			{#each modelsDisabledWithExamples as model}
+				<div>
+					<a class="mb-3 block text-xs text-gray-300" href="/{model.id}">
+						<code>{model.id}</code>
+					</a>
+					<div class="max-w-md rounded-xl bg-white p-5 shadow-sm">
+						<InferenceWidget {apiToken} {model} />
+					</div>
+				</div>
+			{/each}
+		</div>
+	</div>
+</div>
diff --git a/packages/widgets/src/routes/[...model]/+page.svelte b/packages/widgets/src/routes/[...model]/+page.svelte
new file mode 100644
index 0000000000000000000000000000000000000000..c63519c95561bc38307b9072bca688451d37d8b3
--- /dev/null
+++ b/packages/widgets/src/routes/[...model]/+page.svelte
@@ -0,0 +1,28 @@
+<script lang="ts">
+	import ModeSwitcher from "$lib/components/DemoThemeSwitcher/DemoThemeSwitcher.svelte";
+	import InferenceWidget from "$lib/components/InferenceWidget/InferenceWidget.svelte";
+
+	export let data;
+</script>
+
+<a href="/" class="ml-3 block pt-3 underline">← Back to index</a>
+<ModeSwitcher />
+
+<div class="container py-24">
+	{#if data.model}
+		<div>
+			<a class="mb-3 block text-xs text-gray-300" href="/{data.model.id}">
+				<code>{data.model.id}</code>
+			</a>
+			<div class="max-w-3xl rounded-xl bg-white p-5 shadow-sm">
+				<InferenceWidget model={data.model} />
+			</div>
+		</div>
+
+		<pre class="mt-16 px-3 py-4 text-xs text-gray-900">
+			{JSON.stringify(data.model, null, 2)}
+		</pre>
+	{:else}
+		<div>Error. Probably non existent model. {data.message}</div>
+	{/if}
+</div>
diff --git a/packages/widgets/src/routes/[...model]/+page.ts b/packages/widgets/src/routes/[...model]/+page.ts
new file mode 100644
index 0000000000000000000000000000000000000000..ac2edfd7d99f2f072f6277ed6c759a4e4d074f7a
--- /dev/null
+++ b/packages/widgets/src/routes/[...model]/+page.ts
@@ -0,0 +1,17 @@
+import type { WidgetProps } from "$lib/components/InferenceWidget/shared/types.js";
+import type { Load } from "@sveltejs/kit";
+
+export const load: Load = async ({ params, fetch }): Promise<{ model?: WidgetProps["model"]; message?: string }> => {
+	const url = `https://huggingface.co/api/models/${params.model}`;
+	try {
+		const model = await (await fetch(url)).json();
+		return {
+			model,
+		};
+	} catch {
+		// todo: throw error() instead
+		return {
+			message: `Model ${params.model} not found (probably)`,
+		};
+	}
+};
diff --git a/packages/widgets/src/tailwind.css b/packages/widgets/src/tailwind.css
new file mode 100644
index 0000000000000000000000000000000000000000..8f335baed31d5702c693a95e496c60549c208f8b
--- /dev/null
+++ b/packages/widgets/src/tailwind.css
@@ -0,0 +1,226 @@
+@tailwind base;
+
+@layer base {
+	html.dark {
+		--scrollbarBG: #020011;
+		--thumbBG: #374151;
+	}
+	.dark *::-webkit-scrollbar {
+		width: 11px;
+		height: 11px;
+	}
+	.dark * {
+		scrollbar-width: thin;
+		scrollbar-color: var(--thumbBG) var(--scrollbarBG);
+	}
+
+	.dark input,
+	.dark textarea,
+	.dark [contenteditable] {
+		caret-color: white !important;
+	}
+
+	.dark *::-webkit-scrollbar-track {
+		background: var(--scrollbarBG);
+	}
+	.dark *::-webkit-scrollbar-thumb {
+		background-color: var(--thumbBG);
+		border-radius: 6px;
+		border: 3px solid var(--scrollbarBG);
+	}
+	/* .dark input:-internal-autofill-selected {
+		@apply bg-gray-925;
+	} */
+	.dark .bg-white {
+		@apply bg-gray-950;
+	}
+	.dark .text-black {
+		@apply text-gray-200;
+	}
+	.dark .text-gray-900 {
+		@apply text-gray-200;
+	}
+	.dark .text-gray-800 {
+		@apply text-gray-300;
+	}
+	.dark .text-gray-700 {
+		@apply text-gray-300;
+	}
+	.dark .text-gray-600 {
+		@apply text-gray-350;
+	}
+	.dark .text-gray-500 {
+		@apply text-gray-400;
+	}
+	.dark .border-gray-200,
+	.dark .border-gray-100,
+	.dark .border,
+	.dark .border-b {
+		@apply border-gray-850;
+	}
+}
+
+@tailwind components;
+
+@layer components {
+	.btn,
+	.btn-widget {
+		@apply inline-flex cursor-pointer select-none items-center justify-center whitespace-nowrap rounded-lg border bg-gradient-to-b  px-3 py-1 focus:outline-none focus:ring;
+	}
+	.btn {
+		@apply border-gray-200 from-white to-gray-100 text-gray-800 hover:shadow-inner dark:border-gray-900 dark:from-gray-800 dark:to-gray-950 dark:text-gray-200 dark:hover:from-gray-700;
+	}
+	.btn-widget {
+		@apply h-8 from-gray-50 to-gray-200 hover:from-gray-100 dark:border-gray-900 dark:from-gray-800 dark:to-gray-950 dark:hover:from-gray-700;
+	}
+	.btn:disabled,
+	.btn-widget:disabled {
+		@apply cursor-not-allowed opacity-50;
+	}
+	.btn.btn-lg {
+		@apply px-4 py-1.5 font-semibold;
+	}
+	.overview-card-wrapper {
+		@apply from-gray-50-to-white rounded-lg border border-gray-100 bg-gradient-to-r via-white text-base shadow-sm hover:via-gray-50 hover:to-gray-100 dark:border-gray-900 dark:via-gray-950 dark:hover:from-gray-950 dark:hover:via-gray-925  dark:hover:to-gray-925;
+	}
+	.overview-card-wrapper.white {
+		@apply from-white to-white dark:from-gray-925 dark:to-gray-950;
+	}
+	.tab {
+		@apply -mb-px flex cursor-pointer select-none items-center border-r border-gray-200 px-4 text-center;
+	}
+	.tab:not(.active) {
+		@apply hover:text-gray-700;
+	}
+	.tab.active {
+		@apply flex items-center border-r border-gray-200 bg-white px-4 text-center font-semibold;
+	}
+	.tab-alternate {
+		@apply flex h-full items-center whitespace-nowrap border-b-2 border-transparent px-2.5 font-medium text-gray-600 dark:text-gray-400 sm:px-3.5;
+	}
+	.tab-alternate:not(.active) {
+		@apply hover:border-gray-200 dark:hover:border-gray-800;
+	}
+	.tab-alternate.active {
+		@apply border-gray-700 font-semibold text-gray-800 dark:border-gray-400;
+	}
+	.tag {
+		@apply mr-1 mb-1 inline-flex h-7 max-w-full flex-none items-center overflow-hidden truncate rounded-lg border border-transparent bg-gradient-to-b text-sm dark:border-gray-900 md:mr-1.5 md:mb-1.5;
+	}
+	.tag > span {
+		@apply px-2;
+	}
+	.tag.inactive {
+		@apply filter-grayscale opacity-50;
+	}
+	.tag-blue {
+		@apply from-blue-50 to-blue-50 text-blue-800 hover:to-blue-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950;
+	}
+	.tag-ghost {
+		@apply from-transparent to-transparent text-gray-400 hover:from-gray-100 hover:to-gray-100 hover:text-gray-600;
+	}
+	.tag-green {
+		@apply from-green-50 to-green-50 text-green-800 hover:to-green-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950;
+	}
+	.tag-indigo {
+		@apply from-indigo-50 to-indigo-50 text-indigo-800 hover:to-indigo-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950;
+	}
+	.tag-orange {
+		@apply from-orange-50 to-orange-50 text-orange-800 hover:to-orange-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950;
+	}
+	.tag-purple {
+		@apply from-purple-50 to-purple-50 text-purple-800 hover:to-purple-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950;
+	}
+	.tag-red {
+		@apply from-red-50 to-red-50 text-red-800 hover:to-red-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950;
+	}
+	.tag-yellow {
+		@apply from-yellow-50 text-yellow-800 hover:to-yellow-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950;
+	}
+	.tag-white {
+		@apply border-gray-100 from-white to-white text-gray-700 hover:to-gray-100 dark:border-gray-900 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950;
+	}
+	.tag-ico {
+		@apply flex h-7 w-8 flex-none items-center bg-gradient-to-t to-white pl-2 dark:to-gray-950;
+	}
+	.tag-ico-blue {
+		@apply from-blue-50 text-blue-500 dark:from-gray-925;
+	}
+	.tag-ico-green {
+		@apply from-green-50 text-green-500 dark:from-gray-925;
+	}
+	.tag-ico-indigo {
+		@apply from-indigo-50 text-indigo-500 dark:from-gray-925;
+	}
+	.tag-ico-orange {
+		@apply from-orange-50 text-orange-500 dark:from-gray-925;
+	}
+	.tag-ico-purple {
+		@apply from-purple-50 text-purple-500 dark:from-gray-925;
+	}
+	.tag-ico-red {
+		@apply from-red-50 text-red-500 dark:from-gray-925;
+	}
+	.tag-ico-yellow {
+		@apply from-yellow-50 text-yellow-500 dark:from-gray-925;
+	}
+	.form-input:not([type="checkbox"]) {
+		@apply border-2 border-gray-200 shadow-sm
+    focus:border-blue-300 focus:ring focus:ring-blue-200
+    focus:ring-opacity-50 dark:border-gray-700 dark:bg-gray-950;
+	}
+	.form-input:not([type="radio"]):not([type="checkbox"]) {
+		@apply mt-1 block w-full rounded-md;
+	}
+	.form-input[type="radio"] {
+		@apply mt-2 mr-1.5 h-3.5 w-3.5;
+	}
+	.form-input[type="checkbox"] {
+		@apply rounded border-transparent bg-gray-200 text-blue-500 focus:border-transparent  focus:ring-1 focus:ring-gray-200 focus:ring-offset-2;
+	}
+	.form-input[type="checkbox"]:checked {
+		@apply bg-blue-500;
+	}
+	.form-input:disabled {
+		@apply cursor-not-allowed opacity-50;
+	}
+	.form-input-alt {
+		@apply h-10 rounded-lg border border-gray-200 px-3 placeholder-gray-400 shadow-inner outline-none focus:shadow-inner focus:ring-1 focus:ring-inset focus:ring-indigo-200 dark:bg-gray-925 dark:focus:ring-indigo-50;
+	}
+}
+
+@tailwind utilities;
+
+@layer utilities {
+	.filter-none {
+		filter: none;
+	}
+	.filter-grayscale {
+		filter: grayscale(100%);
+	}
+	.from-gray-50-to-white {
+		@apply from-gray-50 to-white dark:from-gray-925 dark:to-gray-950;
+	}
+
+	.from-gray-100-to-white {
+		@apply from-gray-100 to-white dark:from-gray-925 dark:to-gray-925;
+	}
+	.min-h-main {
+		min-height: calc(100vh - theme(spacing.16) - 1px);
+	}
+}
+.alert {
+	@apply rounded-md border border-blue-100 bg-blue-50 py-2 px-3 text-blue-900 dark:border-blue-700 dark:bg-blue-800 dark:text-blue-200;
+}
+.alert a {
+	@apply underline;
+}
+.alert-error {
+	@apply border-red-100 bg-red-50 text-red-900 dark:border-red-700 dark:bg-red-800 dark:text-red-200;
+}
+.alert-success {
+	@apply border-green-100 bg-green-50 text-green-900;
+}
+.alert-warning {
+	@apply border-yellow-100 bg-yellow-50 text-yellow-900;
+}
diff --git a/packages/widgets/static/audioProcessor.js b/packages/widgets/static/audioProcessor.js
new file mode 100644
index 0000000000000000000000000000000000000000..f1cde75a59d5cbfdc31c9704f30f0a8f66c65647
--- /dev/null
+++ b/packages/widgets/static/audioProcessor.js
@@ -0,0 +1,39 @@
+// for js/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/Recorder.ts
+class AudioDataExtractor extends AudioWorkletProcessor {
+	_updateIntervalInMS;
+	_sampleInFrames;
+	_index;
+	_buffer;
+
+	constructor() {
+		super();
+		this._updateIntervalInMS = 50;
+		this._sampleInFrames = parseInt((this._updateIntervalInMS / 1000.0) * sampleRate);
+		this._index = 0;
+		this._buffer = new Float32Array(this._sampleInFrames);
+	}
+
+	process(inputs, outputs, parameters) {
+		// Note that the input will be down-mixed to mono; however, if no inputs are
+		// connected then zero channels will be passed in.
+		if (inputs.length > 0 && inputs[0].length > 0) {
+			const rest = this._buffer.length - this._index;
+			if (rest < inputs[0][0].length) {
+				this._buffer.set(inputs[0][0].slice(0, rest), this._index);
+				this.port.postMessage({
+					buffer: this._buffer.slice(0),
+					sampling_rate: sampleRate,
+				});
+				this._buffer.fill(0);
+				this._index = inputs[0][0].length - rest;
+			} else {
+				this._buffer.set(inputs[0][0], this._index);
+				this._index += inputs[0][0].length;
+			}
+		}
+
+		return true;
+	}
+}
+
+registerProcessor("AudioDataExtractor", AudioDataExtractor);
diff --git a/packages/widgets/static/cats.jpg b/packages/widgets/static/cats.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..e131e8ecdf32c3f751ab0f7b2e5f002683babda2
Binary files /dev/null and b/packages/widgets/static/cats.jpg differ
diff --git a/packages/widgets/static/favicon.png b/packages/widgets/static/favicon.png
new file mode 100644
index 0000000000000000000000000000000000000000..825b9e65af7c104cfb07089bb28659393b4f2097
Binary files /dev/null and b/packages/widgets/static/favicon.png differ
diff --git a/packages/widgets/svelte.config.js b/packages/widgets/svelte.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..456afa84b8259c237e5904b2e5db71ff8f338b15
--- /dev/null
+++ b/packages/widgets/svelte.config.js
@@ -0,0 +1,18 @@
+import adapter from "@sveltejs/adapter-auto";
+import { vitePreprocess } from "@sveltejs/kit/vite";
+
+/** @type {import('@sveltejs/kit').Config} */
+const config = {
+	// Consult https://kit.svelte.dev/docs/integrations#preprocessors
+	// for more information about preprocessors
+	preprocess: vitePreprocess(),
+
+	kit: {
+		// adapter-auto only supports some environments, see https://kit.svelte.dev/docs/adapter-auto for a list.
+		// If your environment is not supported or you settled on a specific environment, switch out the adapter.
+		// See https://kit.svelte.dev/docs/adapters for more information about adapters.
+		adapter: adapter(),
+	},
+};
+
+export default config;
diff --git a/packages/widgets/tailwind.config.cjs b/packages/widgets/tailwind.config.cjs
new file mode 100644
index 0000000000000000000000000000000000000000..6853e61fadeefe1205b529a95bbf6a9065b3d464
--- /dev/null
+++ b/packages/widgets/tailwind.config.cjs
@@ -0,0 +1,43 @@
+// eslint-disable-next-line @typescript-eslint/no-var-requires
+const defaultTheme = require("tailwindcss/defaultTheme");
+// eslint-disable-next-line @typescript-eslint/no-var-requires
+const colors = require("tailwindcss/colors");
+
+module.exports = {
+	content: ["./src/**/*.{html,js,svelte,ts}"],
+	darkMode: "class",
+	theme: {
+		container: {
+			center: true,
+			padding: { DEFAULT: "1rem" },
+		},
+		extend: {
+			colors: {
+				green: colors.emerald,
+				yellow: colors.amber,
+				purple: colors.violet,
+				gray: {
+					350: "#b3bcc9",
+					// Dark blue
+					// 925: '#131f3d',
+					// 950: '#0a1226',
+					// Darker
+					850: "#141c2e",
+					925: "#101623",
+					950: "#0b0f19",
+					// Darkest
+					// 925: '#081122',
+					// 950: '#000511',
+				},
+			},
+			gridTemplateRows: {
+				full: "100%",
+			},
+			fontFamily: {
+				sans: ["Source Sans Pro", ...defaultTheme.fontFamily.sans],
+				mono: ["IBM Plex Mono", ...defaultTheme.fontFamily.mono],
+			},
+		},
+	},
+	plugins: [require("@tailwindcss/forms")],
+};
diff --git a/packages/widgets/tsconfig.json b/packages/widgets/tsconfig.json
new file mode 100644
index 0000000000000000000000000000000000000000..6f788f1603f23e76c22eddfabc3c45a58471d27f
--- /dev/null
+++ b/packages/widgets/tsconfig.json
@@ -0,0 +1,15 @@
+{
+	"extends": "./.svelte-kit/tsconfig.json",
+	"compilerOptions": {
+		"allowJs": true,
+		"checkJs": true,
+		"esModuleInterop": true,
+		"forceConsistentCasingInFileNames": true,
+		"resolveJsonModule": true,
+		"skipLibCheck": true,
+		"sourceMap": true,
+		"strict": true,
+		"module": "NodeNext",
+		"moduleResolution": "NodeNext"
+	}
+}
diff --git a/packages/widgets/vite.config.ts b/packages/widgets/vite.config.ts
new file mode 100644
index 0000000000000000000000000000000000000000..6b9eb5d390d85bd47250c16bd31672968cd3ca4b
--- /dev/null
+++ b/packages/widgets/vite.config.ts
@@ -0,0 +1,6 @@
+import { sveltekit } from "@sveltejs/kit/vite";
+import { defineConfig } from "vite";
+
+export default defineConfig({
+	plugins: [sveltekit()],
+});
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..26d255be45d5a42290df94e32b24093d97af1083
--- /dev/null
+++ b/pnpm-lock.yaml
@@ -0,0 +1,4252 @@
+lockfileVersion: '6.0'
+
+settings:
+  autoInstallPeers: true
+  excludeLinksFromLockfile: false
+
+dependencies:
+  '@typescript-eslint/eslint-plugin':
+    specifier: ^5.51.0
+    version: 5.62.0(@typescript-eslint/parser@5.62.0)(eslint@8.54.0)(typescript@5.3.2)
+  '@typescript-eslint/parser':
+    specifier: ^5.51.0
+    version: 5.62.0(eslint@8.54.0)(typescript@5.3.2)
+  eslint:
+    specifier: ^8.35.0
+    version: 8.54.0
+  eslint-config-prettier:
+    specifier: ^9.0.0
+    version: 9.0.0(eslint@8.54.0)
+  eslint-plugin-prettier:
+    specifier: ^4.2.1
+    version: 4.2.1(eslint-config-prettier@9.0.0)(eslint@8.54.0)(prettier@3.1.0)
+  eslint-plugin-svelte:
+    specifier: ^2.30.0
+    version: 2.35.1(eslint@8.54.0)(svelte@4.2.7)(ts-node@10.9.1)
+  prettier:
+    specifier: ^3.0.0
+    version: 3.1.0
+  prettier-plugin-svelte:
+    specifier: ^3.0.0
+    version: 3.1.1(prettier@3.1.0)(svelte@4.2.7)
+  typescript:
+    specifier: ^5.0.0
+    version: 5.3.2
+  vite:
+    specifier: 4.1.4
+    version: 4.1.4(@types/node@20.9.3)
+
+devDependencies:
+  '@vitest/browser':
+    specifier: ^0.29.7
+    version: 0.29.8(vitest@0.29.8)
+  semver:
+    specifier: ^7.5.0
+    version: 7.5.4
+  ts-node:
+    specifier: ^10.9.1
+    version: 10.9.1(@types/node@20.9.3)(typescript@5.3.2)
+  tsup:
+    specifier: ^6.7.0
+    version: 6.7.0(postcss@8.4.31)(ts-node@10.9.1)(typescript@5.3.2)
+  vitest:
+    specifier: ^0.29.4
+    version: 0.29.8(@vitest/browser@0.29.8)(webdriverio@8.23.4)
+  webdriverio:
+    specifier: ^8.6.7
+    version: 8.23.4(typescript@5.3.2)
+
+packages:
+
+  /@aashutoshrathi/word-wrap@1.2.6:
+    resolution: {integrity: sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==}
+    engines: {node: '>=0.10.0'}
+    dev: false
+
+  /@ampproject/remapping@2.2.1:
+    resolution: {integrity: sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==}
+    engines: {node: '>=6.0.0'}
+    dependencies:
+      '@jridgewell/gen-mapping': 0.3.3
+      '@jridgewell/trace-mapping': 0.3.20
+    dev: false
+
+  /@cspotcode/source-map-support@0.8.1:
+    resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==}
+    engines: {node: '>=12'}
+    dependencies:
+      '@jridgewell/trace-mapping': 0.3.9
+
+  /@esbuild/android-arm64@0.16.17:
+    resolution: {integrity: sha512-MIGl6p5sc3RDTLLkYL1MyL8BMRN4tLMRCn+yRJJmEDvYZ2M7tmAf80hx1kbNEUX2KJ50RRtxZ4JHLvCfuB6kBg==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [android]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/android-arm64@0.17.19:
+    resolution: {integrity: sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/android-arm@0.16.17:
+    resolution: {integrity: sha512-N9x1CMXVhtWEAMS7pNNONyA14f71VPQN9Cnavj1XQh6T7bskqiLLrSca4O0Vr8Wdcga943eThxnVp3JLnBMYtw==}
+    engines: {node: '>=12'}
+    cpu: [arm]
+    os: [android]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/android-arm@0.17.19:
+    resolution: {integrity: sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==}
+    engines: {node: '>=12'}
+    cpu: [arm]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/android-x64@0.16.17:
+    resolution: {integrity: sha512-a3kTv3m0Ghh4z1DaFEuEDfz3OLONKuFvI4Xqczqx4BqLyuFaFkuaG4j2MtA6fuWEFeC5x9IvqnX7drmRq/fyAQ==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [android]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/android-x64@0.17.19:
+    resolution: {integrity: sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/darwin-arm64@0.16.17:
+    resolution: {integrity: sha512-/2agbUEfmxWHi9ARTX6OQ/KgXnOWfsNlTeLcoV7HSuSTv63E4DqtAc+2XqGw1KHxKMHGZgbVCZge7HXWX9Vn+w==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [darwin]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/darwin-arm64@0.17.19:
+    resolution: {integrity: sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/darwin-x64@0.16.17:
+    resolution: {integrity: sha512-2By45OBHulkd9Svy5IOCZt376Aa2oOkiE9QWUK9fe6Tb+WDr8hXL3dpqi+DeLiMed8tVXspzsTAvd0jUl96wmg==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [darwin]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/darwin-x64@0.17.19:
+    resolution: {integrity: sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/freebsd-arm64@0.16.17:
+    resolution: {integrity: sha512-mt+cxZe1tVx489VTb4mBAOo2aKSnJ33L9fr25JXpqQqzbUIw/yzIzi+NHwAXK2qYV1lEFp4OoVeThGjUbmWmdw==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [freebsd]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/freebsd-arm64@0.17.19:
+    resolution: {integrity: sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [freebsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/freebsd-x64@0.16.17:
+    resolution: {integrity: sha512-8ScTdNJl5idAKjH8zGAsN7RuWcyHG3BAvMNpKOBaqqR7EbUhhVHOqXRdL7oZvz8WNHL2pr5+eIT5c65kA6NHug==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [freebsd]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/freebsd-x64@0.17.19:
+    resolution: {integrity: sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [freebsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-arm64@0.16.17:
+    resolution: {integrity: sha512-7S8gJnSlqKGVJunnMCrXHU9Q8Q/tQIxk/xL8BqAP64wchPCTzuM6W3Ra8cIa1HIflAvDnNOt2jaL17vaW+1V0g==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [linux]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/linux-arm64@0.17.19:
+    resolution: {integrity: sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-arm@0.16.17:
+    resolution: {integrity: sha512-iihzrWbD4gIT7j3caMzKb/RsFFHCwqqbrbH9SqUSRrdXkXaygSZCZg1FybsZz57Ju7N/SHEgPyaR0LZ8Zbe9gQ==}
+    engines: {node: '>=12'}
+    cpu: [arm]
+    os: [linux]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/linux-arm@0.17.19:
+    resolution: {integrity: sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==}
+    engines: {node: '>=12'}
+    cpu: [arm]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-ia32@0.16.17:
+    resolution: {integrity: sha512-kiX69+wcPAdgl3Lonh1VI7MBr16nktEvOfViszBSxygRQqSpzv7BffMKRPMFwzeJGPxcio0pdD3kYQGpqQ2SSg==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [linux]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/linux-ia32@0.17.19:
+    resolution: {integrity: sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-loong64@0.14.54:
+    resolution: {integrity: sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw==}
+    engines: {node: '>=12'}
+    cpu: [loong64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-loong64@0.16.17:
+    resolution: {integrity: sha512-dTzNnQwembNDhd654cA4QhbS9uDdXC3TKqMJjgOWsC0yNCbpzfWoXdZvp0mY7HU6nzk5E0zpRGGx3qoQg8T2DQ==}
+    engines: {node: '>=12'}
+    cpu: [loong64]
+    os: [linux]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/linux-loong64@0.17.19:
+    resolution: {integrity: sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==}
+    engines: {node: '>=12'}
+    cpu: [loong64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-mips64el@0.16.17:
+    resolution: {integrity: sha512-ezbDkp2nDl0PfIUn0CsQ30kxfcLTlcx4Foz2kYv8qdC6ia2oX5Q3E/8m6lq84Dj/6b0FrkgD582fJMIfHhJfSw==}
+    engines: {node: '>=12'}
+    cpu: [mips64el]
+    os: [linux]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/linux-mips64el@0.17.19:
+    resolution: {integrity: sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==}
+    engines: {node: '>=12'}
+    cpu: [mips64el]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-ppc64@0.16.17:
+    resolution: {integrity: sha512-dzS678gYD1lJsW73zrFhDApLVdM3cUF2MvAa1D8K8KtcSKdLBPP4zZSLy6LFZ0jYqQdQ29bjAHJDgz0rVbLB3g==}
+    engines: {node: '>=12'}
+    cpu: [ppc64]
+    os: [linux]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/linux-ppc64@0.17.19:
+    resolution: {integrity: sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==}
+    engines: {node: '>=12'}
+    cpu: [ppc64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-riscv64@0.16.17:
+    resolution: {integrity: sha512-ylNlVsxuFjZK8DQtNUwiMskh6nT0vI7kYl/4fZgV1llP5d6+HIeL/vmmm3jpuoo8+NuXjQVZxmKuhDApK0/cKw==}
+    engines: {node: '>=12'}
+    cpu: [riscv64]
+    os: [linux]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/linux-riscv64@0.17.19:
+    resolution: {integrity: sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==}
+    engines: {node: '>=12'}
+    cpu: [riscv64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-s390x@0.16.17:
+    resolution: {integrity: sha512-gzy7nUTO4UA4oZ2wAMXPNBGTzZFP7mss3aKR2hH+/4UUkCOyqmjXiKpzGrY2TlEUhbbejzXVKKGazYcQTZWA/w==}
+    engines: {node: '>=12'}
+    cpu: [s390x]
+    os: [linux]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/linux-s390x@0.17.19:
+    resolution: {integrity: sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==}
+    engines: {node: '>=12'}
+    cpu: [s390x]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/linux-x64@0.16.17:
+    resolution: {integrity: sha512-mdPjPxfnmoqhgpiEArqi4egmBAMYvaObgn4poorpUaqmvzzbvqbowRllQ+ZgzGVMGKaPkqUmPDOOFQRUFDmeUw==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [linux]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/linux-x64@0.17.19:
+    resolution: {integrity: sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/netbsd-x64@0.16.17:
+    resolution: {integrity: sha512-/PzmzD/zyAeTUsduZa32bn0ORug+Jd1EGGAUJvqfeixoEISYpGnAezN6lnJoskauoai0Jrs+XSyvDhppCPoKOA==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [netbsd]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/netbsd-x64@0.17.19:
+    resolution: {integrity: sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [netbsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/openbsd-x64@0.16.17:
+    resolution: {integrity: sha512-2yaWJhvxGEz2RiftSk0UObqJa/b+rIAjnODJgv2GbGGpRwAfpgzyrg1WLK8rqA24mfZa9GvpjLcBBg8JHkoodg==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [openbsd]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/openbsd-x64@0.17.19:
+    resolution: {integrity: sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [openbsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/sunos-x64@0.16.17:
+    resolution: {integrity: sha512-xtVUiev38tN0R3g8VhRfN7Zl42YCJvyBhRKw1RJjwE1d2emWTVToPLNEQj/5Qxc6lVFATDiy6LjVHYhIPrLxzw==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [sunos]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/sunos-x64@0.17.19:
+    resolution: {integrity: sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [sunos]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/win32-arm64@0.16.17:
+    resolution: {integrity: sha512-ga8+JqBDHY4b6fQAmOgtJJue36scANy4l/rL97W+0wYmijhxKetzZdKOJI7olaBaMhWt8Pac2McJdZLxXWUEQw==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [win32]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/win32-arm64@0.17.19:
+    resolution: {integrity: sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/win32-ia32@0.16.17:
+    resolution: {integrity: sha512-WnsKaf46uSSF/sZhwnqE4L/F89AYNMiD4YtEcYekBt9Q7nj0DiId2XH2Ng2PHM54qi5oPrQ8luuzGszqi/veig==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [win32]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/win32-ia32@0.17.19:
+    resolution: {integrity: sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@esbuild/win32-x64@0.16.17:
+    resolution: {integrity: sha512-y+EHuSchhL7FjHgvQL/0fnnFmO4T1bhvWANX6gcnqTjtnKWbTvUMCpGnv2+t+31d7RzyEAYAd4u2fnIhHL6N/Q==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [win32]
+    requiresBuild: true
+    optional: true
+
+  /@esbuild/win32-x64@0.17.19:
+    resolution: {integrity: sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@eslint-community/eslint-utils@4.4.0(eslint@8.54.0):
+    resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    peerDependencies:
+      eslint: ^6.0.0 || ^7.0.0 || >=8.0.0
+    dependencies:
+      eslint: 8.54.0
+      eslint-visitor-keys: 3.4.3
+    dev: false
+
+  /@eslint-community/regexpp@4.10.0:
+    resolution: {integrity: sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==}
+    engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0}
+    dev: false
+
+  /@eslint/eslintrc@2.1.3:
+    resolution: {integrity: sha512-yZzuIG+jnVu6hNSzFEN07e8BxF3uAzYtQb6uDkaYZLo6oYZDCq454c5kB8zxnzfCYyP4MIuyBn10L0DqwujTmA==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dependencies:
+      ajv: 6.12.6
+      debug: 4.3.4
+      espree: 9.6.1
+      globals: 13.23.0
+      ignore: 5.3.0
+      import-fresh: 3.3.0
+      js-yaml: 4.1.0
+      minimatch: 3.1.2
+      strip-json-comments: 3.1.1
+    transitivePeerDependencies:
+      - supports-color
+    dev: false
+
+  /@eslint/js@8.54.0:
+    resolution: {integrity: sha512-ut5V+D+fOoWPgGGNj83GGjnntO39xDy6DWxO0wb7Jp3DcMX0TfIqdzHF85VTQkerdyGmuuMD9AKAo5KiNlf/AQ==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dev: false
+
+  /@humanwhocodes/config-array@0.11.13:
+    resolution: {integrity: sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==}
+    engines: {node: '>=10.10.0'}
+    dependencies:
+      '@humanwhocodes/object-schema': 2.0.1
+      debug: 4.3.4
+      minimatch: 3.1.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: false
+
+  /@humanwhocodes/module-importer@1.0.1:
+    resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==}
+    engines: {node: '>=12.22'}
+    dev: false
+
+  /@humanwhocodes/object-schema@2.0.1:
+    resolution: {integrity: sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==}
+    dev: false
+
+  /@isaacs/cliui@8.0.2:
+    resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
+    engines: {node: '>=12'}
+    dependencies:
+      string-width: 5.1.2
+      string-width-cjs: /string-width@4.2.3
+      strip-ansi: 7.1.0
+      strip-ansi-cjs: /strip-ansi@6.0.1
+      wrap-ansi: 8.1.0
+      wrap-ansi-cjs: /wrap-ansi@7.0.0
+    dev: true
+
+  /@jridgewell/gen-mapping@0.3.3:
+    resolution: {integrity: sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==}
+    engines: {node: '>=6.0.0'}
+    dependencies:
+      '@jridgewell/set-array': 1.1.2
+      '@jridgewell/sourcemap-codec': 1.4.15
+      '@jridgewell/trace-mapping': 0.3.20
+
+  /@jridgewell/resolve-uri@3.1.1:
+    resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==}
+    engines: {node: '>=6.0.0'}
+
+  /@jridgewell/set-array@1.1.2:
+    resolution: {integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==}
+    engines: {node: '>=6.0.0'}
+
+  /@jridgewell/sourcemap-codec@1.4.15:
+    resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==}
+
+  /@jridgewell/trace-mapping@0.3.20:
+    resolution: {integrity: sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==}
+    dependencies:
+      '@jridgewell/resolve-uri': 3.1.1
+      '@jridgewell/sourcemap-codec': 1.4.15
+
+  /@jridgewell/trace-mapping@0.3.9:
+    resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==}
+    dependencies:
+      '@jridgewell/resolve-uri': 3.1.1
+      '@jridgewell/sourcemap-codec': 1.4.15
+
+  /@jspm/core@2.0.0-beta.24:
+    resolution: {integrity: sha512-a4Bo/80Z6CoJNor5ldgs6002utmmbttP4JYd/FJ0Ob2fVdf6O6ha5SORBCqrnDnBvMc1TlrHY7dCfat5+H0a6A==}
+    dev: true
+
+  /@nodelib/fs.scandir@2.1.5:
+    resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
+    engines: {node: '>= 8'}
+    dependencies:
+      '@nodelib/fs.stat': 2.0.5
+      run-parallel: 1.2.0
+
+  /@nodelib/fs.stat@2.0.5:
+    resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==}
+    engines: {node: '>= 8'}
+
+  /@nodelib/fs.walk@1.2.8:
+    resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==}
+    engines: {node: '>= 8'}
+    dependencies:
+      '@nodelib/fs.scandir': 2.1.5
+      fastq: 1.15.0
+
+  /@pkgjs/parseargs@0.11.0:
+    resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==}
+    engines: {node: '>=14'}
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /@polka/url@1.0.0-next.23:
+    resolution: {integrity: sha512-C16M+IYz0rgRhWZdCmK+h58JMv8vijAA61gmz2rspCSwKwzBebpdcsiUmwrtJRdphuY30i6BSLEOP8ppbNLyLg==}
+    dev: true
+
+  /@puppeteer/browsers@1.4.6(typescript@5.3.2):
+    resolution: {integrity: sha512-x4BEjr2SjOPowNeiguzjozQbsc6h437ovD/wu+JpaenxVLm3jkgzHY2xOslMTp50HoTvQreMjiexiGQw1sqZlQ==}
+    engines: {node: '>=16.3.0'}
+    hasBin: true
+    peerDependencies:
+      typescript: '>= 4.7.4'
+    peerDependenciesMeta:
+      typescript:
+        optional: true
+    dependencies:
+      debug: 4.3.4
+      extract-zip: 2.0.1
+      progress: 2.0.3
+      proxy-agent: 6.3.0
+      tar-fs: 3.0.4
+      typescript: 5.3.2
+      unbzip2-stream: 1.4.3
+      yargs: 17.7.1
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@puppeteer/browsers@1.8.0:
+    resolution: {integrity: sha512-TkRHIV6k2D8OlUe8RtG+5jgOF/H98Myx0M6AOafC8DdNVOFiBSFa5cpRDtpm8LXOa9sVwe0+e6Q3FC56X/DZfg==}
+    engines: {node: '>=16.3.0'}
+    hasBin: true
+    dependencies:
+      debug: 4.3.4
+      extract-zip: 2.0.1
+      progress: 2.0.3
+      proxy-agent: 6.3.1
+      tar-fs: 3.0.4
+      unbzip2-stream: 1.4.3
+      yargs: 17.7.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@rollup/plugin-inject@4.0.4(rollup@2.79.1):
+    resolution: {integrity: sha512-4pbcU4J/nS+zuHk+c+OL3WtmEQhqxlZ9uqfjQMQDOHOPld7PsCd8k5LWs8h5wjwJN7MgnAn768F2sDxEP4eNFQ==}
+    peerDependencies:
+      rollup: ^1.20.0 || ^2.0.0
+    dependencies:
+      '@rollup/pluginutils': 3.1.0(rollup@2.79.1)
+      estree-walker: 2.0.2
+      magic-string: 0.25.9
+      rollup: 2.79.1
+    dev: true
+
+  /@rollup/pluginutils@3.1.0(rollup@2.79.1):
+    resolution: {integrity: sha512-GksZ6pr6TpIjHm8h9lSQ8pi8BE9VeubNT0OMJ3B5uZJ8pz73NPiqOtCog/x2/QzM1ENChPKxMDhiQuRHsqc+lg==}
+    engines: {node: '>= 8.0.0'}
+    peerDependencies:
+      rollup: ^1.20.0||^2.0.0
+    dependencies:
+      '@types/estree': 0.0.39
+      estree-walker: 1.0.1
+      picomatch: 2.3.1
+      rollup: 2.79.1
+    dev: true
+
+  /@sindresorhus/is@5.6.0:
+    resolution: {integrity: sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==}
+    engines: {node: '>=14.16'}
+    dev: true
+
+  /@szmarczak/http-timer@5.0.1:
+    resolution: {integrity: sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==}
+    engines: {node: '>=14.16'}
+    dependencies:
+      defer-to-connect: 2.0.1
+    dev: true
+
+  /@tootallnate/quickjs-emscripten@0.23.0:
+    resolution: {integrity: sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==}
+    dev: true
+
+  /@tsconfig/node10@1.0.9:
+    resolution: {integrity: sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==}
+
+  /@tsconfig/node12@1.0.11:
+    resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==}
+
+  /@tsconfig/node14@1.0.3:
+    resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==}
+
+  /@tsconfig/node16@1.0.4:
+    resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==}
+
+  /@types/chai-subset@1.3.5:
+    resolution: {integrity: sha512-c2mPnw+xHtXDoHmdtcCXGwyLMiauiAyxWMzhGpqHC4nqI/Y5G2XhTampslK2rb59kpcuHon03UH8W6iYUzw88A==}
+    dependencies:
+      '@types/chai': 4.3.11
+    dev: true
+
+  /@types/chai@4.3.11:
+    resolution: {integrity: sha512-qQR1dr2rGIHYlJulmr8Ioq3De0Le9E4MJ5AiaeAETJJpndT1uUNHsGFK3L/UIu+rbkQSdj8J/w2bCsBZc/Y5fQ==}
+    dev: true
+
+  /@types/estree@0.0.39:
+    resolution: {integrity: sha512-EYNwp3bU+98cpU4lAWYYL7Zz+2gryWH1qbdDTidVd6hkiR6weksdbMadyXKXNPEkQFhXM+hVO9ZygomHXp+AIw==}
+    dev: true
+
+  /@types/estree@1.0.5:
+    resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==}
+    dev: false
+
+  /@types/http-cache-semantics@4.0.4:
+    resolution: {integrity: sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==}
+    dev: true
+
+  /@types/json-schema@7.0.15:
+    resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==}
+    dev: false
+
+  /@types/node@20.9.3:
+    resolution: {integrity: sha512-nk5wXLAXGBKfrhLB0cyHGbSqopS+nz0BUgZkUQqSHSSgdee0kssp1IAqlQOu333bW+gMNs2QREx7iynm19Abxw==}
+    dependencies:
+      undici-types: 5.26.5
+
+  /@types/semver@7.5.6:
+    resolution: {integrity: sha512-dn1l8LaMea/IjDoHNd9J52uBbInB796CDffS6VdIxvqYCPSG0V0DzHp76GpaWnlhg88uYyPbXCDIowa86ybd5A==}
+    dev: false
+
+  /@types/which@2.0.2:
+    resolution: {integrity: sha512-113D3mDkZDjo+EeUEHCFy0qniNc1ZpecGiAU7WSo7YDoSzolZIQKpYFHrPpjkB2nuyahcKfrmLXeQlh7gqJYdw==}
+    dev: true
+
+  /@types/ws@8.5.10:
+    resolution: {integrity: sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==}
+    dependencies:
+      '@types/node': 20.9.3
+    dev: true
+
+  /@types/yauzl@2.10.3:
+    resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==}
+    requiresBuild: true
+    dependencies:
+      '@types/node': 20.9.3
+    dev: true
+    optional: true
+
+  /@typescript-eslint/eslint-plugin@5.62.0(@typescript-eslint/parser@5.62.0)(eslint@8.54.0)(typescript@5.3.2):
+    resolution: {integrity: sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    peerDependencies:
+      '@typescript-eslint/parser': ^5.0.0
+      eslint: ^6.0.0 || ^7.0.0 || ^8.0.0
+      typescript: '*'
+    peerDependenciesMeta:
+      typescript:
+        optional: true
+    dependencies:
+      '@eslint-community/regexpp': 4.10.0
+      '@typescript-eslint/parser': 5.62.0(eslint@8.54.0)(typescript@5.3.2)
+      '@typescript-eslint/scope-manager': 5.62.0
+      '@typescript-eslint/type-utils': 5.62.0(eslint@8.54.0)(typescript@5.3.2)
+      '@typescript-eslint/utils': 5.62.0(eslint@8.54.0)(typescript@5.3.2)
+      debug: 4.3.4
+      eslint: 8.54.0
+      graphemer: 1.4.0
+      ignore: 5.3.0
+      natural-compare-lite: 1.4.0
+      semver: 7.5.4
+      tsutils: 3.21.0(typescript@5.3.2)
+      typescript: 5.3.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: false
+
+  /@typescript-eslint/parser@5.62.0(eslint@8.54.0)(typescript@5.3.2):
+    resolution: {integrity: sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    peerDependencies:
+      eslint: ^6.0.0 || ^7.0.0 || ^8.0.0
+      typescript: '*'
+    peerDependenciesMeta:
+      typescript:
+        optional: true
+    dependencies:
+      '@typescript-eslint/scope-manager': 5.62.0
+      '@typescript-eslint/types': 5.62.0
+      '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.3.2)
+      debug: 4.3.4
+      eslint: 8.54.0
+      typescript: 5.3.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: false
+
+  /@typescript-eslint/scope-manager@5.62.0:
+    resolution: {integrity: sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dependencies:
+      '@typescript-eslint/types': 5.62.0
+      '@typescript-eslint/visitor-keys': 5.62.0
+    dev: false
+
+  /@typescript-eslint/type-utils@5.62.0(eslint@8.54.0)(typescript@5.3.2):
+    resolution: {integrity: sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    peerDependencies:
+      eslint: '*'
+      typescript: '*'
+    peerDependenciesMeta:
+      typescript:
+        optional: true
+    dependencies:
+      '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.3.2)
+      '@typescript-eslint/utils': 5.62.0(eslint@8.54.0)(typescript@5.3.2)
+      debug: 4.3.4
+      eslint: 8.54.0
+      tsutils: 3.21.0(typescript@5.3.2)
+      typescript: 5.3.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: false
+
+  /@typescript-eslint/types@5.62.0:
+    resolution: {integrity: sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dev: false
+
+  /@typescript-eslint/typescript-estree@5.62.0(typescript@5.3.2):
+    resolution: {integrity: sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    peerDependencies:
+      typescript: '*'
+    peerDependenciesMeta:
+      typescript:
+        optional: true
+    dependencies:
+      '@typescript-eslint/types': 5.62.0
+      '@typescript-eslint/visitor-keys': 5.62.0
+      debug: 4.3.4
+      globby: 11.1.0
+      is-glob: 4.0.3
+      semver: 7.5.4
+      tsutils: 3.21.0(typescript@5.3.2)
+      typescript: 5.3.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: false
+
+  /@typescript-eslint/utils@5.62.0(eslint@8.54.0)(typescript@5.3.2):
+    resolution: {integrity: sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    peerDependencies:
+      eslint: ^6.0.0 || ^7.0.0 || ^8.0.0
+    dependencies:
+      '@eslint-community/eslint-utils': 4.4.0(eslint@8.54.0)
+      '@types/json-schema': 7.0.15
+      '@types/semver': 7.5.6
+      '@typescript-eslint/scope-manager': 5.62.0
+      '@typescript-eslint/types': 5.62.0
+      '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.3.2)
+      eslint: 8.54.0
+      eslint-scope: 5.1.1
+      semver: 7.5.4
+    transitivePeerDependencies:
+      - supports-color
+      - typescript
+    dev: false
+
+  /@typescript-eslint/visitor-keys@5.62.0:
+    resolution: {integrity: sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dependencies:
+      '@typescript-eslint/types': 5.62.0
+      eslint-visitor-keys: 3.4.3
+    dev: false
+
+  /@ungap/structured-clone@1.2.0:
+    resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==}
+    dev: false
+
+  /@vitest/browser@0.29.8(vitest@0.29.8):
+    resolution: {integrity: sha512-EEZV9GOOVrNaSE1eg41e3mUFzRVnfvemJl2aDIkVHqvFWuueMe9MFel+KipXyLUGTxmq40q++3wTkEX58rhVHA==}
+    peerDependencies:
+      vitest: '>=0.29.4'
+    dependencies:
+      '@vitest/runner': 0.29.8
+      local-pkg: 0.4.3
+      mlly: 1.4.2
+      modern-node-polyfills: 0.1.0
+      rollup-plugin-node-polyfills: 0.2.1
+      sirv: 2.0.3
+      vitest: 0.29.8(@vitest/browser@0.29.8)(webdriverio@8.23.4)
+    dev: true
+
+  /@vitest/expect@0.29.8:
+    resolution: {integrity: sha512-xlcVXn5I5oTq6NiZSY3ykyWixBxr5mG8HYtjvpgg6KaqHm0mvhX18xuwl5YGxIRNt/A5jidd7CWcNHrSvgaQqQ==}
+    dependencies:
+      '@vitest/spy': 0.29.8
+      '@vitest/utils': 0.29.8
+      chai: 4.3.10
+    dev: true
+
+  /@vitest/runner@0.29.8:
+    resolution: {integrity: sha512-FzdhnRDwEr/A3Oo1jtIk/B952BBvP32n1ObMEb23oEJNO+qO5cBet6M2XWIDQmA7BDKGKvmhUf2naXyp/2JEwQ==}
+    dependencies:
+      '@vitest/utils': 0.29.8
+      p-limit: 4.0.0
+      pathe: 1.1.1
+    dev: true
+
+  /@vitest/spy@0.29.8:
+    resolution: {integrity: sha512-VdjBe9w34vOMl5I5mYEzNX8inTxrZ+tYUVk9jxaZJmHFwmDFC/GV3KBFTA/JKswr3XHvZL+FE/yq5EVhb6pSAw==}
+    dependencies:
+      tinyspy: 1.1.1
+    dev: true
+
+  /@vitest/utils@0.29.8:
+    resolution: {integrity: sha512-qGzuf3vrTbnoY+RjjVVIBYfuWMjn3UMUqyQtdGNZ6ZIIyte7B37exj6LaVkrZiUTvzSadVvO/tJm8AEgbGCBPg==}
+    dependencies:
+      cli-truncate: 3.1.0
+      diff: 5.1.0
+      loupe: 2.3.7
+      pretty-format: 27.5.1
+    dev: true
+
+  /@wdio/config@8.23.1:
+    resolution: {integrity: sha512-MljMBvMr+QYoy4/FytFHWorFE3CrBdEWuroOaGzC/0gkVOcHRO4nOy2rKahdcPXJAuxFwJNqqHhBPj+4tWiz9w==}
+    engines: {node: ^16.13 || >=18}
+    dependencies:
+      '@wdio/logger': 8.16.17
+      '@wdio/types': 8.23.1
+      '@wdio/utils': 8.23.1
+      decamelize: 6.0.0
+      deepmerge-ts: 5.1.0
+      glob: 10.3.10
+      import-meta-resolve: 3.1.1
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /@wdio/logger@8.16.17:
+    resolution: {integrity: sha512-zeQ41z3T+b4IsrriZZipayXxLNDuGsm7TdExaviNGojPVrIsQUCSd/FvlLHM32b7ZrMyInHenu/zx1cjAZO71g==}
+    engines: {node: ^16.13 || >=18}
+    dependencies:
+      chalk: 5.3.0
+      loglevel: 1.8.1
+      loglevel-plugin-prefix: 0.8.4
+      strip-ansi: 7.1.0
+    dev: true
+
+  /@wdio/protocols@8.23.0:
+    resolution: {integrity: sha512-2XTzD+lqQP3g8BWn+Bn5BTFzjHqzZNwq7DjlYrb27Bq8nOA+1DEcj3WzQ6V6CktTnKI/LAYKA1IFAF//Azrp/Q==}
+    dev: true
+
+  /@wdio/repl@8.23.1:
+    resolution: {integrity: sha512-u6zG2cgBm67V5/WlQzadWqLGXs3moH8MOsgoljULQncelSBBZGZ5DyLB4p7jKcUAsKtMjgmFQmIvpQoqmyvdfg==}
+    engines: {node: ^16.13 || >=18}
+    dependencies:
+      '@types/node': 20.9.3
+    dev: true
+
+  /@wdio/types@8.23.1:
+    resolution: {integrity: sha512-ym3tWSUGvmKwQ9vNPQfcKvJwGNK/Fh3e5WloNj3zoaUTKgD0aJeFQ0+Dz6KGlNowA0j5VkcqTTXo+UZ3l4Cx9A==}
+    engines: {node: ^16.13 || >=18}
+    dependencies:
+      '@types/node': 20.9.3
+    dev: true
+
+  /@wdio/utils@8.23.1:
+    resolution: {integrity: sha512-VA47MOpt+7svHj3W9r+DUl3t73tJbjF7+ZXL0Lk7QLe79xevd+mPk+YmuTEepn+0MljJWAuqRCEKFG/HK77RNw==}
+    engines: {node: ^16.13 || >=18}
+    dependencies:
+      '@puppeteer/browsers': 1.8.0
+      '@wdio/logger': 8.16.17
+      '@wdio/types': 8.23.1
+      decamelize: 6.0.0
+      deepmerge-ts: 5.1.0
+      edgedriver: 5.3.8
+      geckodriver: 4.2.1
+      get-port: 7.0.0
+      got: 13.0.0
+      import-meta-resolve: 3.1.1
+      locate-app: 2.1.0
+      safaridriver: 0.1.0
+      split2: 4.2.0
+      wait-port: 1.1.0
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /acorn-jsx@5.3.2(acorn@8.11.2):
+    resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==}
+    peerDependencies:
+      acorn: ^6.0.0 || ^7.0.0 || ^8.0.0
+    dependencies:
+      acorn: 8.11.2
+    dev: false
+
+  /acorn-walk@8.3.0:
+    resolution: {integrity: sha512-FS7hV565M5l1R08MXqo8odwMTB02C2UqzB17RVgu9EyuYFBqJZ3/ZY97sQD5FewVu1UyDFc1yztUDrAwT0EypA==}
+    engines: {node: '>=0.4.0'}
+
+  /acorn@8.11.2:
+    resolution: {integrity: sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==}
+    engines: {node: '>=0.4.0'}
+    hasBin: true
+
+  /agent-base@7.1.0:
+    resolution: {integrity: sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==}
+    engines: {node: '>= 14'}
+    dependencies:
+      debug: 4.3.4
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /ajv@6.12.6:
+    resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==}
+    dependencies:
+      fast-deep-equal: 3.1.3
+      fast-json-stable-stringify: 2.1.0
+      json-schema-traverse: 0.4.1
+      uri-js: 4.4.1
+    dev: false
+
+  /ansi-regex@5.0.1:
+    resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
+    engines: {node: '>=8'}
+
+  /ansi-regex@6.0.1:
+    resolution: {integrity: sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /ansi-styles@4.3.0:
+    resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==}
+    engines: {node: '>=8'}
+    dependencies:
+      color-convert: 2.0.1
+
+  /ansi-styles@5.2.0:
+    resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /ansi-styles@6.2.1:
+    resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /any-promise@1.3.0:
+    resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==}
+    dev: true
+
+  /anymatch@3.1.3:
+    resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
+    engines: {node: '>= 8'}
+    dependencies:
+      normalize-path: 3.0.0
+      picomatch: 2.3.1
+    dev: true
+
+  /archiver-utils@4.0.1:
+    resolution: {integrity: sha512-Q4Q99idbvzmgCTEAAhi32BkOyq8iVI5EwdO0PmBDSGIzzjYNdcFn7Q7k3OzbLy4kLUPXfJtG6fO2RjftXbobBg==}
+    engines: {node: '>= 12.0.0'}
+    dependencies:
+      glob: 8.1.0
+      graceful-fs: 4.2.11
+      lazystream: 1.0.1
+      lodash: 4.17.21
+      normalize-path: 3.0.0
+      readable-stream: 3.6.2
+    dev: true
+
+  /archiver@6.0.1:
+    resolution: {integrity: sha512-CXGy4poOLBKptiZH//VlWdFuUC1RESbdZjGjILwBuZ73P7WkAUN0htfSfBq/7k6FRFlpu7bg4JOkj1vU9G6jcQ==}
+    engines: {node: '>= 12.0.0'}
+    dependencies:
+      archiver-utils: 4.0.1
+      async: 3.2.5
+      buffer-crc32: 0.2.13
+      readable-stream: 3.6.2
+      readdir-glob: 1.1.3
+      tar-stream: 3.1.6
+      zip-stream: 5.0.1
+    dev: true
+
+  /arg@4.1.3:
+    resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==}
+
+  /argparse@2.0.1:
+    resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
+    dev: false
+
+  /aria-query@5.3.0:
+    resolution: {integrity: sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==}
+    dependencies:
+      dequal: 2.0.3
+
+  /array-union@2.1.0:
+    resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==}
+    engines: {node: '>=8'}
+
+  /assertion-error@1.1.0:
+    resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==}
+    dev: true
+
+  /ast-types@0.13.4:
+    resolution: {integrity: sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==}
+    engines: {node: '>=4'}
+    dependencies:
+      tslib: 2.6.2
+    dev: true
+
+  /async@3.2.5:
+    resolution: {integrity: sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==}
+    dev: true
+
+  /axobject-query@3.2.1:
+    resolution: {integrity: sha512-jsyHu61e6N4Vbz/v18DHwWYKK0bSWLqn47eeDSKPB7m8tqMHF9YJ+mhIk2lVteyZrY8tnSj/jHOv4YiTCuCJgg==}
+    dependencies:
+      dequal: 2.0.3
+    dev: false
+
+  /b4a@1.6.4:
+    resolution: {integrity: sha512-fpWrvyVHEKyeEvbKZTVOeZF3VSKKWtJxFIxX/jaVPf+cLbGUSitjb49pHLqPV2BUNNZ0LcoeEGfE/YCpyDYHIw==}
+    dev: true
+
+  /balanced-match@1.0.2:
+    resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
+
+  /base64-js@1.5.1:
+    resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
+    dev: true
+
+  /basic-ftp@5.0.3:
+    resolution: {integrity: sha512-QHX8HLlncOLpy54mh+k/sWIFd0ThmRqwe9ZjELybGZK+tZ8rUb9VO0saKJUROTbE+KhzDUT7xziGpGrW8Kmd+g==}
+    engines: {node: '>=10.0.0'}
+    dev: true
+
+  /big-integer@1.6.52:
+    resolution: {integrity: sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg==}
+    engines: {node: '>=0.6'}
+    dev: true
+
+  /binary-extensions@2.2.0:
+    resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /binary@0.3.0:
+    resolution: {integrity: sha512-D4H1y5KYwpJgK8wk1Cue5LLPgmwHKYSChkbspQg5JtVuR5ulGckxfR62H3AE9UDkdMC8yyXlqYihuz3Aqg2XZg==}
+    dependencies:
+      buffers: 0.1.1
+      chainsaw: 0.1.0
+    dev: true
+
+  /bluebird@3.4.7:
+    resolution: {integrity: sha512-iD3898SR7sWVRHbiQv+sHUtHnMvC1o3nW5rAcqnq3uOn07DSAppZYUkIGslDz6gXC7HfunPe7YVBgoEJASPcHA==}
+    dev: true
+
+  /brace-expansion@1.1.11:
+    resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==}
+    dependencies:
+      balanced-match: 1.0.2
+      concat-map: 0.0.1
+
+  /brace-expansion@2.0.1:
+    resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==}
+    dependencies:
+      balanced-match: 1.0.2
+    dev: true
+
+  /braces@3.0.2:
+    resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==}
+    engines: {node: '>=8'}
+    dependencies:
+      fill-range: 7.0.1
+
+  /buffer-crc32@0.2.13:
+    resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==}
+    dev: true
+
+  /buffer-indexof-polyfill@1.0.2:
+    resolution: {integrity: sha512-I7wzHwA3t1/lwXQh+A5PbNvJxgfo5r3xulgpYDB5zckTu/Z9oUK9biouBKQUjEqzaz3HnAT6TYoovmE+GqSf7A==}
+    engines: {node: '>=0.10'}
+    dev: true
+
+  /buffer@5.7.1:
+    resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==}
+    dependencies:
+      base64-js: 1.5.1
+      ieee754: 1.2.1
+    dev: true
+
+  /buffers@0.1.1:
+    resolution: {integrity: sha512-9q/rDEGSb/Qsvv2qvzIzdluL5k7AaJOTrw23z9reQthrbF7is4CtlT0DXyO1oei2DCp4uojjzQ7igaSHp1kAEQ==}
+    engines: {node: '>=0.2.0'}
+    dev: true
+
+  /bundle-require@4.0.2(esbuild@0.17.19):
+    resolution: {integrity: sha512-jwzPOChofl67PSTW2SGubV9HBQAhhR2i6nskiOThauo9dzwDUgOWQScFVaJkjEfYX+UXiD+LEx8EblQMc2wIag==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    peerDependencies:
+      esbuild: '>=0.17'
+    dependencies:
+      esbuild: 0.17.19
+      load-tsconfig: 0.2.5
+    dev: true
+
+  /cac@6.7.14:
+    resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /cacheable-lookup@7.0.0:
+    resolution: {integrity: sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==}
+    engines: {node: '>=14.16'}
+    dev: true
+
+  /cacheable-request@10.2.14:
+    resolution: {integrity: sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==}
+    engines: {node: '>=14.16'}
+    dependencies:
+      '@types/http-cache-semantics': 4.0.4
+      get-stream: 6.0.1
+      http-cache-semantics: 4.1.1
+      keyv: 4.5.4
+      mimic-response: 4.0.0
+      normalize-url: 8.0.0
+      responselike: 3.0.0
+    dev: true
+
+  /callsites@3.1.0:
+    resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==}
+    engines: {node: '>=6'}
+    dev: false
+
+  /chai@4.3.10:
+    resolution: {integrity: sha512-0UXG04VuVbruMUYbJ6JctvH0YnC/4q3/AkT18q4NaITo91CUm0liMS9VqzT9vZhVQ/1eqPanMWjBM+Juhfb/9g==}
+    engines: {node: '>=4'}
+    dependencies:
+      assertion-error: 1.1.0
+      check-error: 1.0.3
+      deep-eql: 4.1.3
+      get-func-name: 2.0.2
+      loupe: 2.3.7
+      pathval: 1.1.1
+      type-detect: 4.0.8
+    dev: true
+
+  /chainsaw@0.1.0:
+    resolution: {integrity: sha512-75kWfWt6MEKNC8xYXIdRpDehRYY/tNSgwKaJq+dbbDcxORuVrrQ+SEHoWsniVn9XPYfP4gmdWIeDk/4YNp1rNQ==}
+    dependencies:
+      traverse: 0.3.9
+    dev: true
+
+  /chalk@4.1.2:
+    resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
+    engines: {node: '>=10'}
+    dependencies:
+      ansi-styles: 4.3.0
+      supports-color: 7.2.0
+
+  /chalk@5.3.0:
+    resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==}
+    engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
+    dev: true
+
+  /check-error@1.0.3:
+    resolution: {integrity: sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==}
+    dependencies:
+      get-func-name: 2.0.2
+    dev: true
+
+  /chokidar@3.5.3:
+    resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==}
+    engines: {node: '>= 8.10.0'}
+    dependencies:
+      anymatch: 3.1.3
+      braces: 3.0.2
+      glob-parent: 5.1.2
+      is-binary-path: 2.1.0
+      is-glob: 4.0.3
+      normalize-path: 3.0.0
+      readdirp: 3.6.0
+    optionalDependencies:
+      fsevents: 2.3.3
+    dev: true
+
+  /chromium-bidi@0.4.16(devtools-protocol@0.0.1147663):
+    resolution: {integrity: sha512-7ZbXdWERxRxSwo3txsBjjmc/NLxqb1Bk30mRb0BMS4YIaiV6zvKZqL/UAH+DdqcDYayDWk2n/y8klkBDODrPvA==}
+    peerDependencies:
+      devtools-protocol: '*'
+    dependencies:
+      devtools-protocol: 0.0.1147663
+      mitt: 3.0.0
+    dev: true
+
+  /cli-truncate@3.1.0:
+    resolution: {integrity: sha512-wfOBkjXteqSnI59oPcJkcPl/ZmwvMMOj340qUIY1SKZCv0B9Cf4D4fAucRkIKQmsIuYK3x1rrgU7MeGRruiuiA==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dependencies:
+      slice-ansi: 5.0.0
+      string-width: 5.1.2
+    dev: true
+
+  /cliui@8.0.1:
+    resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==}
+    engines: {node: '>=12'}
+    dependencies:
+      string-width: 4.2.3
+      strip-ansi: 6.0.1
+      wrap-ansi: 7.0.0
+    dev: true
+
+  /code-red@1.0.4:
+    resolution: {integrity: sha512-7qJWqItLA8/VPVlKJlFXU+NBlo/qyfs39aJcuMT/2ere32ZqvF5OSxgdM5xOfJJ7O429gg2HM47y8v9P+9wrNw==}
+    dependencies:
+      '@jridgewell/sourcemap-codec': 1.4.15
+      '@types/estree': 1.0.5
+      acorn: 8.11.2
+      estree-walker: 3.0.3
+      periscopic: 3.1.0
+    dev: false
+
+  /color-convert@2.0.1:
+    resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==}
+    engines: {node: '>=7.0.0'}
+    dependencies:
+      color-name: 1.1.4
+
+  /color-name@1.1.4:
+    resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==}
+
+  /commander@4.1.1:
+    resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==}
+    engines: {node: '>= 6'}
+    dev: true
+
+  /commander@9.5.0:
+    resolution: {integrity: sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==}
+    engines: {node: ^12.20.0 || >=14}
+    dev: true
+
+  /compress-commons@5.0.1:
+    resolution: {integrity: sha512-MPh//1cERdLtqwO3pOFLeXtpuai0Y2WCd5AhtKxznqM7WtaMYaOEMSgn45d9D10sIHSfIKE603HlOp8OPGrvag==}
+    engines: {node: '>= 12.0.0'}
+    dependencies:
+      crc-32: 1.2.2
+      crc32-stream: 5.0.0
+      normalize-path: 3.0.0
+      readable-stream: 3.6.2
+    dev: true
+
+  /concat-map@0.0.1:
+    resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
+
+  /core-util-is@1.0.3:
+    resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==}
+    dev: true
+
+  /crc-32@1.2.2:
+    resolution: {integrity: sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==}
+    engines: {node: '>=0.8'}
+    hasBin: true
+    dev: true
+
+  /crc32-stream@5.0.0:
+    resolution: {integrity: sha512-B0EPa1UK+qnpBZpG+7FgPCu0J2ETLpXq09o9BkLkEAhdB6Z61Qo4pJ3JYu0c+Qi+/SAL7QThqnzS06pmSSyZaw==}
+    engines: {node: '>= 12.0.0'}
+    dependencies:
+      crc-32: 1.2.2
+      readable-stream: 3.6.2
+    dev: true
+
+  /create-require@1.1.1:
+    resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==}
+
+  /cross-fetch@4.0.0:
+    resolution: {integrity: sha512-e4a5N8lVvuLgAWgnCrLr2PP0YyDOTHa9H/Rj54dirp61qXnNq46m82bRhNqIA5VccJtWBvPTFRV3TtvHUKPB1g==}
+    dependencies:
+      node-fetch: 2.7.0
+    transitivePeerDependencies:
+      - encoding
+    dev: true
+
+  /cross-spawn@7.0.3:
+    resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==}
+    engines: {node: '>= 8'}
+    dependencies:
+      path-key: 3.1.1
+      shebang-command: 2.0.0
+      which: 2.0.2
+
+  /css-shorthand-properties@1.1.1:
+    resolution: {integrity: sha512-Md+Juc7M3uOdbAFwOYlTrccIZ7oCFuzrhKYQjdeUEW/sE1hv17Jp/Bws+ReOPpGVBTYCBoYo+G17V5Qo8QQ75A==}
+    dev: true
+
+  /css-tree@2.3.1:
+    resolution: {integrity: sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==}
+    engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0}
+    dependencies:
+      mdn-data: 2.0.30
+      source-map-js: 1.0.2
+    dev: false
+
+  /css-value@0.0.1:
+    resolution: {integrity: sha512-FUV3xaJ63buRLgHrLQVlVgQnQdR4yqdLGaDu7g8CQcWjInDfM9plBTPI9FRfpahju1UBSaMckeb2/46ApS/V1Q==}
+    dev: true
+
+  /cssesc@3.0.0:
+    resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==}
+    engines: {node: '>=4'}
+    hasBin: true
+    dev: false
+
+  /data-uri-to-buffer@4.0.1:
+    resolution: {integrity: sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==}
+    engines: {node: '>= 12'}
+    dev: true
+
+  /data-uri-to-buffer@6.0.1:
+    resolution: {integrity: sha512-MZd3VlchQkp8rdend6vrx7MmVDJzSNTBvghvKjirLkD+WTChA3KUf0jkE68Q4UyctNqI11zZO9/x2Yx+ub5Cvg==}
+    engines: {node: '>= 14'}
+    dev: true
+
+  /debug@4.3.4:
+    resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==}
+    engines: {node: '>=6.0'}
+    peerDependencies:
+      supports-color: '*'
+    peerDependenciesMeta:
+      supports-color:
+        optional: true
+    dependencies:
+      ms: 2.1.2
+
+  /decamelize@6.0.0:
+    resolution: {integrity: sha512-Fv96DCsdOgB6mdGl67MT5JaTNKRzrzill5OH5s8bjYJXVlcXyPYGyPsUkWyGV5p1TXI5esYIYMMeDJL0hEIwaA==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dev: true
+
+  /decompress-response@6.0.0:
+    resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==}
+    engines: {node: '>=10'}
+    dependencies:
+      mimic-response: 3.1.0
+    dev: true
+
+  /deep-eql@4.1.3:
+    resolution: {integrity: sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==}
+    engines: {node: '>=6'}
+    dependencies:
+      type-detect: 4.0.8
+    dev: true
+
+  /deep-is@0.1.4:
+    resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==}
+    dev: false
+
+  /deepmerge-ts@5.1.0:
+    resolution: {integrity: sha512-eS8dRJOckyo9maw9Tu5O5RUi/4inFLrnoLkBe3cPfDMx3WZioXtmOew4TXQaxq7Rhl4xjDtR7c6x8nNTxOvbFw==}
+    engines: {node: '>=16.0.0'}
+    dev: true
+
+  /defer-to-connect@2.0.1:
+    resolution: {integrity: sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /degenerator@5.0.1:
+    resolution: {integrity: sha512-TllpMR/t0M5sqCXfj85i4XaAzxmS5tVA16dqvdkMwGmzI+dXLXnw3J+3Vdv7VKw+ThlTMboK6i9rnZ6Nntj5CQ==}
+    engines: {node: '>= 14'}
+    dependencies:
+      ast-types: 0.13.4
+      escodegen: 2.1.0
+      esprima: 4.0.1
+    dev: true
+
+  /dequal@2.0.3:
+    resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==}
+    engines: {node: '>=6'}
+
+  /devtools-protocol@0.0.1147663:
+    resolution: {integrity: sha512-hyWmRrexdhbZ1tcJUGpO95ivbRhWXz++F4Ko+n21AY5PNln2ovoJw+8ZMNDTtip+CNFQfrtLVh/w4009dXO/eQ==}
+    dev: true
+
+  /devtools-protocol@0.0.1213968:
+    resolution: {integrity: sha512-o4n/beY+3CcZwFctYapjGelKptR4AuQT5gXS1Kvgbig+ArwkxK7f8wDVuD1wsoswiJWCwV6OK+Qb7vhNzNmABQ==}
+    dev: true
+
+  /diff@4.0.2:
+    resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==}
+    engines: {node: '>=0.3.1'}
+
+  /diff@5.1.0:
+    resolution: {integrity: sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==}
+    engines: {node: '>=0.3.1'}
+    dev: true
+
+  /dir-glob@3.0.1:
+    resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==}
+    engines: {node: '>=8'}
+    dependencies:
+      path-type: 4.0.0
+
+  /doctrine@3.0.0:
+    resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==}
+    engines: {node: '>=6.0.0'}
+    dependencies:
+      esutils: 2.0.3
+    dev: false
+
+  /duplexer2@0.1.4:
+    resolution: {integrity: sha512-asLFVfWWtJ90ZyOUHMqk7/S2w2guQKxUI2itj3d92ADHhxUSbCMGi1f1cBcJ7xM1To+pE/Khbwo1yuNbMEPKeA==}
+    dependencies:
+      readable-stream: 2.3.8
+    dev: true
+
+  /eastasianwidth@0.2.0:
+    resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==}
+    dev: true
+
+  /edge-paths@3.0.5:
+    resolution: {integrity: sha512-sB7vSrDnFa4ezWQk9nZ/n0FdpdUuC6R1EOrlU3DL+bovcNFK28rqu2emmAUjujYEJTWIgQGqgVVWUZXMnc8iWg==}
+    engines: {node: '>=14.0.0'}
+    dependencies:
+      '@types/which': 2.0.2
+      which: 2.0.2
+    dev: true
+
+  /edgedriver@5.3.8:
+    resolution: {integrity: sha512-FWLPDuwJDeGGgtmlqTXb4lQi/HV9yylLo1F9O1g9TLqSemA5T6xH28seUIfyleVirLFtDQyKNUxKsMhMT4IfnA==}
+    hasBin: true
+    requiresBuild: true
+    dependencies:
+      '@wdio/logger': 8.16.17
+      decamelize: 6.0.0
+      edge-paths: 3.0.5
+      node-fetch: 3.3.2
+      unzipper: 0.10.14
+      which: 4.0.0
+    dev: true
+
+  /emoji-regex@8.0.0:
+    resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==}
+    dev: true
+
+  /emoji-regex@9.2.2:
+    resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==}
+    dev: true
+
+  /end-of-stream@1.4.4:
+    resolution: {integrity: sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==}
+    dependencies:
+      once: 1.4.0
+    dev: true
+
+  /esbuild-android-64@0.14.54:
+    resolution: {integrity: sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-android-arm64@0.14.54:
+    resolution: {integrity: sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [android]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-darwin-64@0.14.54:
+    resolution: {integrity: sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-darwin-arm64@0.14.54:
+    resolution: {integrity: sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [darwin]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-freebsd-64@0.14.54:
+    resolution: {integrity: sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [freebsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-freebsd-arm64@0.14.54:
+    resolution: {integrity: sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [freebsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-linux-32@0.14.54:
+    resolution: {integrity: sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-linux-64@0.14.54:
+    resolution: {integrity: sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-linux-arm64@0.14.54:
+    resolution: {integrity: sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-linux-arm@0.14.54:
+    resolution: {integrity: sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw==}
+    engines: {node: '>=12'}
+    cpu: [arm]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-linux-mips64le@0.14.54:
+    resolution: {integrity: sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw==}
+    engines: {node: '>=12'}
+    cpu: [mips64el]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-linux-ppc64le@0.14.54:
+    resolution: {integrity: sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ==}
+    engines: {node: '>=12'}
+    cpu: [ppc64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-linux-riscv64@0.14.54:
+    resolution: {integrity: sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg==}
+    engines: {node: '>=12'}
+    cpu: [riscv64]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-linux-s390x@0.14.54:
+    resolution: {integrity: sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA==}
+    engines: {node: '>=12'}
+    cpu: [s390x]
+    os: [linux]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-netbsd-64@0.14.54:
+    resolution: {integrity: sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [netbsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-openbsd-64@0.14.54:
+    resolution: {integrity: sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [openbsd]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-sunos-64@0.14.54:
+    resolution: {integrity: sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [sunos]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-windows-32@0.14.54:
+    resolution: {integrity: sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w==}
+    engines: {node: '>=12'}
+    cpu: [ia32]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-windows-64@0.14.54:
+    resolution: {integrity: sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ==}
+    engines: {node: '>=12'}
+    cpu: [x64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild-windows-arm64@0.14.54:
+    resolution: {integrity: sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg==}
+    engines: {node: '>=12'}
+    cpu: [arm64]
+    os: [win32]
+    requiresBuild: true
+    dev: true
+    optional: true
+
+  /esbuild@0.14.54:
+    resolution: {integrity: sha512-Cy9llcy8DvET5uznocPyqL3BFRrFXSVqbgpMJ9Wz8oVjZlh/zUSNbPRbov0VX7VxN2JH1Oa0uNxZ7eLRb62pJA==}
+    engines: {node: '>=12'}
+    hasBin: true
+    requiresBuild: true
+    optionalDependencies:
+      '@esbuild/linux-loong64': 0.14.54
+      esbuild-android-64: 0.14.54
+      esbuild-android-arm64: 0.14.54
+      esbuild-darwin-64: 0.14.54
+      esbuild-darwin-arm64: 0.14.54
+      esbuild-freebsd-64: 0.14.54
+      esbuild-freebsd-arm64: 0.14.54
+      esbuild-linux-32: 0.14.54
+      esbuild-linux-64: 0.14.54
+      esbuild-linux-arm: 0.14.54
+      esbuild-linux-arm64: 0.14.54
+      esbuild-linux-mips64le: 0.14.54
+      esbuild-linux-ppc64le: 0.14.54
+      esbuild-linux-riscv64: 0.14.54
+      esbuild-linux-s390x: 0.14.54
+      esbuild-netbsd-64: 0.14.54
+      esbuild-openbsd-64: 0.14.54
+      esbuild-sunos-64: 0.14.54
+      esbuild-windows-32: 0.14.54
+      esbuild-windows-64: 0.14.54
+      esbuild-windows-arm64: 0.14.54
+    dev: true
+
+  /esbuild@0.16.17:
+    resolution: {integrity: sha512-G8LEkV0XzDMNwXKgM0Jwu3nY3lSTwSGY6XbxM9cr9+s0T/qSV1q1JVPBGzm3dcjhCic9+emZDmMffkwgPeOeLg==}
+    engines: {node: '>=12'}
+    hasBin: true
+    requiresBuild: true
+    optionalDependencies:
+      '@esbuild/android-arm': 0.16.17
+      '@esbuild/android-arm64': 0.16.17
+      '@esbuild/android-x64': 0.16.17
+      '@esbuild/darwin-arm64': 0.16.17
+      '@esbuild/darwin-x64': 0.16.17
+      '@esbuild/freebsd-arm64': 0.16.17
+      '@esbuild/freebsd-x64': 0.16.17
+      '@esbuild/linux-arm': 0.16.17
+      '@esbuild/linux-arm64': 0.16.17
+      '@esbuild/linux-ia32': 0.16.17
+      '@esbuild/linux-loong64': 0.16.17
+      '@esbuild/linux-mips64el': 0.16.17
+      '@esbuild/linux-ppc64': 0.16.17
+      '@esbuild/linux-riscv64': 0.16.17
+      '@esbuild/linux-s390x': 0.16.17
+      '@esbuild/linux-x64': 0.16.17
+      '@esbuild/netbsd-x64': 0.16.17
+      '@esbuild/openbsd-x64': 0.16.17
+      '@esbuild/sunos-x64': 0.16.17
+      '@esbuild/win32-arm64': 0.16.17
+      '@esbuild/win32-ia32': 0.16.17
+      '@esbuild/win32-x64': 0.16.17
+
+  /esbuild@0.17.19:
+    resolution: {integrity: sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==}
+    engines: {node: '>=12'}
+    hasBin: true
+    requiresBuild: true
+    optionalDependencies:
+      '@esbuild/android-arm': 0.17.19
+      '@esbuild/android-arm64': 0.17.19
+      '@esbuild/android-x64': 0.17.19
+      '@esbuild/darwin-arm64': 0.17.19
+      '@esbuild/darwin-x64': 0.17.19
+      '@esbuild/freebsd-arm64': 0.17.19
+      '@esbuild/freebsd-x64': 0.17.19
+      '@esbuild/linux-arm': 0.17.19
+      '@esbuild/linux-arm64': 0.17.19
+      '@esbuild/linux-ia32': 0.17.19
+      '@esbuild/linux-loong64': 0.17.19
+      '@esbuild/linux-mips64el': 0.17.19
+      '@esbuild/linux-ppc64': 0.17.19
+      '@esbuild/linux-riscv64': 0.17.19
+      '@esbuild/linux-s390x': 0.17.19
+      '@esbuild/linux-x64': 0.17.19
+      '@esbuild/netbsd-x64': 0.17.19
+      '@esbuild/openbsd-x64': 0.17.19
+      '@esbuild/sunos-x64': 0.17.19
+      '@esbuild/win32-arm64': 0.17.19
+      '@esbuild/win32-ia32': 0.17.19
+      '@esbuild/win32-x64': 0.17.19
+    dev: true
+
+  /escalade@3.1.1:
+    resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /escape-string-regexp@4.0.0:
+    resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==}
+    engines: {node: '>=10'}
+    dev: false
+
+  /escodegen@2.1.0:
+    resolution: {integrity: sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==}
+    engines: {node: '>=6.0'}
+    hasBin: true
+    dependencies:
+      esprima: 4.0.1
+      estraverse: 5.3.0
+      esutils: 2.0.3
+    optionalDependencies:
+      source-map: 0.6.1
+    dev: true
+
+  /eslint-compat-utils@0.1.2(eslint@8.54.0):
+    resolution: {integrity: sha512-Jia4JDldWnFNIru1Ehx1H5s9/yxiRHY/TimCuUc0jNexew3cF1gI6CYZil1ociakfWO3rRqFjl1mskBblB3RYg==}
+    engines: {node: '>=12'}
+    peerDependencies:
+      eslint: '>=6.0.0'
+    dependencies:
+      eslint: 8.54.0
+    dev: false
+
+  /eslint-config-prettier@9.0.0(eslint@8.54.0):
+    resolution: {integrity: sha512-IcJsTkJae2S35pRsRAwoCE+925rJJStOdkKnLVgtE+tEpqU0EVVM7OqrwxqgptKdX29NUwC82I5pXsGFIgSevw==}
+    hasBin: true
+    peerDependencies:
+      eslint: '>=7.0.0'
+    dependencies:
+      eslint: 8.54.0
+    dev: false
+
+  /eslint-plugin-prettier@4.2.1(eslint-config-prettier@9.0.0)(eslint@8.54.0)(prettier@3.1.0):
+    resolution: {integrity: sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==}
+    engines: {node: '>=12.0.0'}
+    peerDependencies:
+      eslint: '>=7.28.0'
+      eslint-config-prettier: '*'
+      prettier: '>=2.0.0'
+    peerDependenciesMeta:
+      eslint-config-prettier:
+        optional: true
+    dependencies:
+      eslint: 8.54.0
+      eslint-config-prettier: 9.0.0(eslint@8.54.0)
+      prettier: 3.1.0
+      prettier-linter-helpers: 1.0.0
+    dev: false
+
+  /eslint-plugin-svelte@2.35.1(eslint@8.54.0)(svelte@4.2.7)(ts-node@10.9.1):
+    resolution: {integrity: sha512-IF8TpLnROSGy98Z3NrsKXWDSCbNY2ReHDcrYTuXZMbfX7VmESISR78TWgO9zdg4Dht1X8coub5jKwHzP0ExRug==}
+    engines: {node: ^14.17.0 || >=16.0.0}
+    peerDependencies:
+      eslint: ^7.0.0 || ^8.0.0-0
+      svelte: ^3.37.0 || ^4.0.0
+    peerDependenciesMeta:
+      svelte:
+        optional: true
+    dependencies:
+      '@eslint-community/eslint-utils': 4.4.0(eslint@8.54.0)
+      '@jridgewell/sourcemap-codec': 1.4.15
+      debug: 4.3.4
+      eslint: 8.54.0
+      eslint-compat-utils: 0.1.2(eslint@8.54.0)
+      esutils: 2.0.3
+      known-css-properties: 0.29.0
+      postcss: 8.4.31
+      postcss-load-config: 3.1.4(postcss@8.4.31)(ts-node@10.9.1)
+      postcss-safe-parser: 6.0.0(postcss@8.4.31)
+      postcss-selector-parser: 6.0.13
+      semver: 7.5.4
+      svelte: 4.2.7
+      svelte-eslint-parser: 0.33.1(svelte@4.2.7)
+    transitivePeerDependencies:
+      - supports-color
+      - ts-node
+    dev: false
+
+  /eslint-scope@5.1.1:
+    resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==}
+    engines: {node: '>=8.0.0'}
+    dependencies:
+      esrecurse: 4.3.0
+      estraverse: 4.3.0
+    dev: false
+
+  /eslint-scope@7.2.2:
+    resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dependencies:
+      esrecurse: 4.3.0
+      estraverse: 5.3.0
+    dev: false
+
+  /eslint-visitor-keys@3.4.3:
+    resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dev: false
+
+  /eslint@8.54.0:
+    resolution: {integrity: sha512-NY0DfAkM8BIZDVl6PgSa1ttZbx3xHgJzSNJKYcQglem6CppHyMhRIQkBVSSMaSRnLhig3jsDbEzOjwCVt4AmmA==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    hasBin: true
+    dependencies:
+      '@eslint-community/eslint-utils': 4.4.0(eslint@8.54.0)
+      '@eslint-community/regexpp': 4.10.0
+      '@eslint/eslintrc': 2.1.3
+      '@eslint/js': 8.54.0
+      '@humanwhocodes/config-array': 0.11.13
+      '@humanwhocodes/module-importer': 1.0.1
+      '@nodelib/fs.walk': 1.2.8
+      '@ungap/structured-clone': 1.2.0
+      ajv: 6.12.6
+      chalk: 4.1.2
+      cross-spawn: 7.0.3
+      debug: 4.3.4
+      doctrine: 3.0.0
+      escape-string-regexp: 4.0.0
+      eslint-scope: 7.2.2
+      eslint-visitor-keys: 3.4.3
+      espree: 9.6.1
+      esquery: 1.5.0
+      esutils: 2.0.3
+      fast-deep-equal: 3.1.3
+      file-entry-cache: 6.0.1
+      find-up: 5.0.0
+      glob-parent: 6.0.2
+      globals: 13.23.0
+      graphemer: 1.4.0
+      ignore: 5.3.0
+      imurmurhash: 0.1.4
+      is-glob: 4.0.3
+      is-path-inside: 3.0.3
+      js-yaml: 4.1.0
+      json-stable-stringify-without-jsonify: 1.0.1
+      levn: 0.4.1
+      lodash.merge: 4.6.2
+      minimatch: 3.1.2
+      natural-compare: 1.4.0
+      optionator: 0.9.3
+      strip-ansi: 6.0.1
+      text-table: 0.2.0
+    transitivePeerDependencies:
+      - supports-color
+    dev: false
+
+  /espree@9.6.1:
+    resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    dependencies:
+      acorn: 8.11.2
+      acorn-jsx: 5.3.2(acorn@8.11.2)
+      eslint-visitor-keys: 3.4.3
+    dev: false
+
+  /esprima@4.0.1:
+    resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==}
+    engines: {node: '>=4'}
+    hasBin: true
+    dev: true
+
+  /esquery@1.5.0:
+    resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==}
+    engines: {node: '>=0.10'}
+    dependencies:
+      estraverse: 5.3.0
+    dev: false
+
+  /esrecurse@4.3.0:
+    resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==}
+    engines: {node: '>=4.0'}
+    dependencies:
+      estraverse: 5.3.0
+    dev: false
+
+  /estraverse@4.3.0:
+    resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==}
+    engines: {node: '>=4.0'}
+    dev: false
+
+  /estraverse@5.3.0:
+    resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==}
+    engines: {node: '>=4.0'}
+
+  /estree-walker@0.6.1:
+    resolution: {integrity: sha512-SqmZANLWS0mnatqbSfRP5g8OXZC12Fgg1IwNtLsyHDzJizORW4khDfjPqJZsemPWBB2uqykUah5YpQ6epsqC/w==}
+    dev: true
+
+  /estree-walker@1.0.1:
+    resolution: {integrity: sha512-1fMXF3YP4pZZVozF8j/ZLfvnR8NSIljt56UhbZ5PeeDmmGHpgpdwQt7ITlGvYaQukCvuBRMLEiKiYC+oeIg4cg==}
+    dev: true
+
+  /estree-walker@2.0.2:
+    resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==}
+    dev: true
+
+  /estree-walker@3.0.3:
+    resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==}
+    dependencies:
+      '@types/estree': 1.0.5
+    dev: false
+
+  /esutils@2.0.3:
+    resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==}
+    engines: {node: '>=0.10.0'}
+
+  /execa@5.1.1:
+    resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==}
+    engines: {node: '>=10'}
+    dependencies:
+      cross-spawn: 7.0.3
+      get-stream: 6.0.1
+      human-signals: 2.1.0
+      is-stream: 2.0.1
+      merge-stream: 2.0.0
+      npm-run-path: 4.0.1
+      onetime: 5.1.2
+      signal-exit: 3.0.7
+      strip-final-newline: 2.0.0
+    dev: true
+
+  /extract-zip@2.0.1:
+    resolution: {integrity: sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==}
+    engines: {node: '>= 10.17.0'}
+    hasBin: true
+    dependencies:
+      debug: 4.3.4
+      get-stream: 5.2.0
+      yauzl: 2.10.0
+    optionalDependencies:
+      '@types/yauzl': 2.10.3
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /fast-deep-equal@2.0.1:
+    resolution: {integrity: sha512-bCK/2Z4zLidyB4ReuIsvALH6w31YfAQDmXMqMx6FyfHqvBxtjC0eRumeSu4Bs3XtXwpyIywtSTrVT99BxY1f9w==}
+    dev: true
+
+  /fast-deep-equal@3.1.3:
+    resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==}
+    dev: false
+
+  /fast-diff@1.3.0:
+    resolution: {integrity: sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==}
+    dev: false
+
+  /fast-fifo@1.3.2:
+    resolution: {integrity: sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==}
+    dev: true
+
+  /fast-glob@3.3.2:
+    resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==}
+    engines: {node: '>=8.6.0'}
+    dependencies:
+      '@nodelib/fs.stat': 2.0.5
+      '@nodelib/fs.walk': 1.2.8
+      glob-parent: 5.1.2
+      merge2: 1.4.1
+      micromatch: 4.0.5
+
+  /fast-json-stable-stringify@2.1.0:
+    resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==}
+    dev: false
+
+  /fast-levenshtein@2.0.6:
+    resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==}
+    dev: false
+
+  /fastq@1.15.0:
+    resolution: {integrity: sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==}
+    dependencies:
+      reusify: 1.0.4
+
+  /fd-slicer@1.1.0:
+    resolution: {integrity: sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==}
+    dependencies:
+      pend: 1.2.0
+    dev: true
+
+  /fetch-blob@3.2.0:
+    resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==}
+    engines: {node: ^12.20 || >= 14.13}
+    dependencies:
+      node-domexception: 1.0.0
+      web-streams-polyfill: 3.2.1
+    dev: true
+
+  /file-entry-cache@6.0.1:
+    resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==}
+    engines: {node: ^10.12.0 || >=12.0.0}
+    dependencies:
+      flat-cache: 3.2.0
+    dev: false
+
+  /fill-range@7.0.1:
+    resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==}
+    engines: {node: '>=8'}
+    dependencies:
+      to-regex-range: 5.0.1
+
+  /find-up@5.0.0:
+    resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==}
+    engines: {node: '>=10'}
+    dependencies:
+      locate-path: 6.0.0
+      path-exists: 4.0.0
+    dev: false
+
+  /flat-cache@3.2.0:
+    resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==}
+    engines: {node: ^10.12.0 || >=12.0.0}
+    dependencies:
+      flatted: 3.2.9
+      keyv: 4.5.4
+      rimraf: 3.0.2
+    dev: false
+
+  /flatted@3.2.9:
+    resolution: {integrity: sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==}
+    dev: false
+
+  /foreground-child@3.1.1:
+    resolution: {integrity: sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==}
+    engines: {node: '>=14'}
+    dependencies:
+      cross-spawn: 7.0.3
+      signal-exit: 4.1.0
+    dev: true
+
+  /form-data-encoder@2.1.4:
+    resolution: {integrity: sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==}
+    engines: {node: '>= 14.17'}
+    dev: true
+
+  /formdata-polyfill@4.0.10:
+    resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==}
+    engines: {node: '>=12.20.0'}
+    dependencies:
+      fetch-blob: 3.2.0
+    dev: true
+
+  /fs-extra@8.1.0:
+    resolution: {integrity: sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==}
+    engines: {node: '>=6 <7 || >=8'}
+    dependencies:
+      graceful-fs: 4.2.11
+      jsonfile: 4.0.0
+      universalify: 0.1.2
+    dev: true
+
+  /fs.realpath@1.0.0:
+    resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
+
+  /fsevents@2.3.3:
+    resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
+    engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
+    os: [darwin]
+    requiresBuild: true
+    optional: true
+
+  /fstream@1.0.12:
+    resolution: {integrity: sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==}
+    engines: {node: '>=0.6'}
+    dependencies:
+      graceful-fs: 4.2.11
+      inherits: 2.0.4
+      mkdirp: 0.5.6
+      rimraf: 2.7.1
+    dev: true
+
+  /function-bind@1.1.2:
+    resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==}
+
+  /geckodriver@4.2.1:
+    resolution: {integrity: sha512-4m/CRk0OI8MaANRuFIahvOxYTSjlNAO2p9JmE14zxueknq6cdtB5M9UGRQ8R9aMV0bLGNVHHDnDXmoXdOwJfWg==}
+    engines: {node: ^16.13 || >=18 || >=20}
+    hasBin: true
+    requiresBuild: true
+    dependencies:
+      '@wdio/logger': 8.16.17
+      decamelize: 6.0.0
+      http-proxy-agent: 7.0.0
+      https-proxy-agent: 7.0.2
+      node-fetch: 3.3.2
+      tar-fs: 3.0.4
+      unzipper: 0.10.14
+      which: 4.0.0
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /get-caller-file@2.0.5:
+    resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==}
+    engines: {node: 6.* || 8.* || >= 10.*}
+    dev: true
+
+  /get-func-name@2.0.2:
+    resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==}
+    dev: true
+
+  /get-port@7.0.0:
+    resolution: {integrity: sha512-mDHFgApoQd+azgMdwylJrv2DX47ywGq1i5VFJE7fZ0dttNq3iQMfsU4IvEgBHojA3KqEudyu7Vq+oN8kNaNkWw==}
+    engines: {node: '>=16'}
+    dev: true
+
+  /get-stream@5.2.0:
+    resolution: {integrity: sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==}
+    engines: {node: '>=8'}
+    dependencies:
+      pump: 3.0.0
+    dev: true
+
+  /get-stream@6.0.1:
+    resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /get-uri@6.0.2:
+    resolution: {integrity: sha512-5KLucCJobh8vBY1K07EFV4+cPZH3mrV9YeAruUseCQKHB58SGjjT2l9/eA9LD082IiuMjSlFJEcdJ27TXvbZNw==}
+    engines: {node: '>= 14'}
+    dependencies:
+      basic-ftp: 5.0.3
+      data-uri-to-buffer: 6.0.1
+      debug: 4.3.4
+      fs-extra: 8.1.0
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /glob-parent@5.1.2:
+    resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==}
+    engines: {node: '>= 6'}
+    dependencies:
+      is-glob: 4.0.3
+
+  /glob-parent@6.0.2:
+    resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==}
+    engines: {node: '>=10.13.0'}
+    dependencies:
+      is-glob: 4.0.3
+    dev: false
+
+  /glob@10.3.10:
+    resolution: {integrity: sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==}
+    engines: {node: '>=16 || 14 >=14.17'}
+    hasBin: true
+    dependencies:
+      foreground-child: 3.1.1
+      jackspeak: 2.3.6
+      minimatch: 9.0.3
+      minipass: 7.0.4
+      path-scurry: 1.10.1
+    dev: true
+
+  /glob@7.1.6:
+    resolution: {integrity: sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==}
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 3.1.2
+      once: 1.4.0
+      path-is-absolute: 1.0.1
+    dev: true
+
+  /glob@7.2.3:
+    resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==}
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 3.1.2
+      once: 1.4.0
+      path-is-absolute: 1.0.1
+
+  /glob@8.1.0:
+    resolution: {integrity: sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==}
+    engines: {node: '>=12'}
+    dependencies:
+      fs.realpath: 1.0.0
+      inflight: 1.0.6
+      inherits: 2.0.4
+      minimatch: 5.1.6
+      once: 1.4.0
+    dev: true
+
+  /globals@13.23.0:
+    resolution: {integrity: sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==}
+    engines: {node: '>=8'}
+    dependencies:
+      type-fest: 0.20.2
+    dev: false
+
+  /globby@11.1.0:
+    resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==}
+    engines: {node: '>=10'}
+    dependencies:
+      array-union: 2.1.0
+      dir-glob: 3.0.1
+      fast-glob: 3.3.2
+      ignore: 5.3.0
+      merge2: 1.4.1
+      slash: 3.0.0
+
+  /got@12.6.1:
+    resolution: {integrity: sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==}
+    engines: {node: '>=14.16'}
+    dependencies:
+      '@sindresorhus/is': 5.6.0
+      '@szmarczak/http-timer': 5.0.1
+      cacheable-lookup: 7.0.0
+      cacheable-request: 10.2.14
+      decompress-response: 6.0.0
+      form-data-encoder: 2.1.4
+      get-stream: 6.0.1
+      http2-wrapper: 2.2.1
+      lowercase-keys: 3.0.0
+      p-cancelable: 3.0.0
+      responselike: 3.0.0
+    dev: true
+
+  /got@13.0.0:
+    resolution: {integrity: sha512-XfBk1CxOOScDcMr9O1yKkNaQyy865NbYs+F7dr4H0LZMVgCj2Le59k6PqbNHoL5ToeaEQUYh6c6yMfVcc6SJxA==}
+    engines: {node: '>=16'}
+    dependencies:
+      '@sindresorhus/is': 5.6.0
+      '@szmarczak/http-timer': 5.0.1
+      cacheable-lookup: 7.0.0
+      cacheable-request: 10.2.14
+      decompress-response: 6.0.0
+      form-data-encoder: 2.1.4
+      get-stream: 6.0.1
+      http2-wrapper: 2.2.1
+      lowercase-keys: 3.0.0
+      p-cancelable: 3.0.0
+      responselike: 3.0.0
+    dev: true
+
+  /graceful-fs@4.2.11:
+    resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==}
+    dev: true
+
+  /grapheme-splitter@1.0.4:
+    resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==}
+    dev: true
+
+  /graphemer@1.4.0:
+    resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==}
+    dev: false
+
+  /has-flag@4.0.0:
+    resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==}
+    engines: {node: '>=8'}
+
+  /hasown@2.0.0:
+    resolution: {integrity: sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==}
+    engines: {node: '>= 0.4'}
+    dependencies:
+      function-bind: 1.1.2
+
+  /http-cache-semantics@4.1.1:
+    resolution: {integrity: sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==}
+    dev: true
+
+  /http-proxy-agent@7.0.0:
+    resolution: {integrity: sha512-+ZT+iBxVUQ1asugqnD6oWoRiS25AkjNfG085dKJGtGxkdwLQrMKU5wJr2bOOFAXzKcTuqq+7fZlTMgG3SRfIYQ==}
+    engines: {node: '>= 14'}
+    dependencies:
+      agent-base: 7.1.0
+      debug: 4.3.4
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /http2-wrapper@2.2.1:
+    resolution: {integrity: sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==}
+    engines: {node: '>=10.19.0'}
+    dependencies:
+      quick-lru: 5.1.1
+      resolve-alpn: 1.2.1
+    dev: true
+
+  /https-proxy-agent@7.0.2:
+    resolution: {integrity: sha512-NmLNjm6ucYwtcUmL7JQC1ZQ57LmHP4lT15FQ8D61nak1rO6DH+fz5qNK2Ap5UN4ZapYICE3/0KodcLYSPsPbaA==}
+    engines: {node: '>= 14'}
+    dependencies:
+      agent-base: 7.1.0
+      debug: 4.3.4
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /human-signals@2.1.0:
+    resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==}
+    engines: {node: '>=10.17.0'}
+    dev: true
+
+  /ieee754@1.2.1:
+    resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==}
+    dev: true
+
+  /ignore@5.3.0:
+    resolution: {integrity: sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg==}
+    engines: {node: '>= 4'}
+
+  /import-fresh@3.3.0:
+    resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==}
+    engines: {node: '>=6'}
+    dependencies:
+      parent-module: 1.0.1
+      resolve-from: 4.0.0
+    dev: false
+
+  /import-meta-resolve@3.1.1:
+    resolution: {integrity: sha512-qeywsE/KC3w9Fd2ORrRDUw6nS/nLwZpXgfrOc2IILvZYnCaEMd+D56Vfg9k4G29gIeVi3XKql1RQatME8iYsiw==}
+    dev: true
+
+  /imurmurhash@0.1.4:
+    resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
+    engines: {node: '>=0.8.19'}
+    dev: false
+
+  /inflight@1.0.6:
+    resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==}
+    dependencies:
+      once: 1.4.0
+      wrappy: 1.0.2
+
+  /inherits@2.0.4:
+    resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
+
+  /ip@1.1.8:
+    resolution: {integrity: sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==}
+    dev: true
+
+  /ip@2.0.0:
+    resolution: {integrity: sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==}
+    dev: true
+
+  /is-binary-path@2.1.0:
+    resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==}
+    engines: {node: '>=8'}
+    dependencies:
+      binary-extensions: 2.2.0
+    dev: true
+
+  /is-core-module@2.13.1:
+    resolution: {integrity: sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==}
+    dependencies:
+      hasown: 2.0.0
+
+  /is-extglob@2.1.1:
+    resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==}
+    engines: {node: '>=0.10.0'}
+
+  /is-fullwidth-code-point@3.0.0:
+    resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /is-fullwidth-code-point@4.0.0:
+    resolution: {integrity: sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /is-glob@4.0.3:
+    resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==}
+    engines: {node: '>=0.10.0'}
+    dependencies:
+      is-extglob: 2.1.1
+
+  /is-number@7.0.0:
+    resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==}
+    engines: {node: '>=0.12.0'}
+
+  /is-path-inside@3.0.3:
+    resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==}
+    engines: {node: '>=8'}
+    dev: false
+
+  /is-plain-obj@4.1.0:
+    resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /is-reference@3.0.2:
+    resolution: {integrity: sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==}
+    dependencies:
+      '@types/estree': 1.0.5
+    dev: false
+
+  /is-stream@2.0.1:
+    resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /isarray@1.0.0:
+    resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==}
+    dev: true
+
+  /isexe@2.0.0:
+    resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==}
+
+  /isexe@3.1.1:
+    resolution: {integrity: sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==}
+    engines: {node: '>=16'}
+    dev: true
+
+  /jackspeak@2.3.6:
+    resolution: {integrity: sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==}
+    engines: {node: '>=14'}
+    dependencies:
+      '@isaacs/cliui': 8.0.2
+    optionalDependencies:
+      '@pkgjs/parseargs': 0.11.0
+    dev: true
+
+  /joycon@3.1.1:
+    resolution: {integrity: sha512-34wB/Y7MW7bzjKRjUKTa46I2Z7eV62Rkhva+KkopW7Qvv/OSWBqvkSY7vusOPrNuZcUG3tApvdVgNB8POj3SPw==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /js-yaml@4.1.0:
+    resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==}
+    hasBin: true
+    dependencies:
+      argparse: 2.0.1
+    dev: false
+
+  /json-buffer@3.0.1:
+    resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==}
+
+  /json-schema-traverse@0.4.1:
+    resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
+    dev: false
+
+  /json-stable-stringify-without-jsonify@1.0.1:
+    resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
+    dev: false
+
+  /jsonc-parser@3.2.0:
+    resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==}
+    dev: true
+
+  /jsonfile@4.0.0:
+    resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==}
+    optionalDependencies:
+      graceful-fs: 4.2.11
+    dev: true
+
+  /keyv@4.5.4:
+    resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==}
+    dependencies:
+      json-buffer: 3.0.1
+
+  /known-css-properties@0.29.0:
+    resolution: {integrity: sha512-Ne7wqW7/9Cz54PDt4I3tcV+hAyat8ypyOGzYRJQfdxnnjeWsTxt1cy8pjvvKeI5kfXuyvULyeeAvwvvtAX3ayQ==}
+    dev: false
+
+  /ky@0.33.3:
+    resolution: {integrity: sha512-CasD9OCEQSFIam2U8efFK81Yeg8vNMTBUqtMOHlrcWQHqUX3HeCl9Dr31u4toV7emlH8Mymk5+9p0lL6mKb/Xw==}
+    engines: {node: '>=14.16'}
+    dev: true
+
+  /lazystream@1.0.1:
+    resolution: {integrity: sha512-b94GiNHQNy6JNTrt5w6zNyffMrNkXZb3KTkCZJb2V1xaEGCk093vkZ2jk3tpaeP33/OiXC+WvK9AxUebnf5nbw==}
+    engines: {node: '>= 0.6.3'}
+    dependencies:
+      readable-stream: 2.3.8
+    dev: true
+
+  /levn@0.4.1:
+    resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
+    engines: {node: '>= 0.8.0'}
+    dependencies:
+      prelude-ls: 1.2.1
+      type-check: 0.4.0
+    dev: false
+
+  /lilconfig@2.1.0:
+    resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==}
+    engines: {node: '>=10'}
+
+  /lines-and-columns@1.2.4:
+    resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==}
+    dev: true
+
+  /listenercount@1.0.1:
+    resolution: {integrity: sha512-3mk/Zag0+IJxeDrxSgaDPy4zZ3w05PRZeJNnlWhzFz5OkX49J4krc+A8X2d2M69vGMBEX0uyl8M+W+8gH+kBqQ==}
+    dev: true
+
+  /load-tsconfig@0.2.5:
+    resolution: {integrity: sha512-IXO6OCs9yg8tMKzfPZ1YmheJbZCiEsnBdcB03l0OcfK9prKnJb96siuHCr5Fl37/yo9DnKU+TLpxzTUspw9shg==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dev: true
+
+  /local-pkg@0.4.3:
+    resolution: {integrity: sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==}
+    engines: {node: '>=14'}
+    dev: true
+
+  /locate-app@2.1.0:
+    resolution: {integrity: sha512-rcVo/iLUxrd9d0lrmregK/Z5Y5NCpSwf9KlMbPpOHmKmdxdQY1Fj8NDQ5QymJTryCsBLqwmniFv2f3JKbk9Bvg==}
+    dependencies:
+      n12: 0.4.0
+      type-fest: 2.13.0
+      userhome: 1.0.0
+    dev: true
+
+  /locate-character@3.0.0:
+    resolution: {integrity: sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==}
+    dev: false
+
+  /locate-path@6.0.0:
+    resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==}
+    engines: {node: '>=10'}
+    dependencies:
+      p-locate: 5.0.0
+    dev: false
+
+  /lodash.clonedeep@4.5.0:
+    resolution: {integrity: sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==}
+    dev: true
+
+  /lodash.merge@4.6.2:
+    resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
+    dev: false
+
+  /lodash.sortby@4.7.0:
+    resolution: {integrity: sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==}
+    dev: true
+
+  /lodash.zip@4.2.0:
+    resolution: {integrity: sha512-C7IOaBBK/0gMORRBd8OETNx3kmOkgIWIPvyDpZSCTwUrpYmgZwJkjZeOD8ww4xbOUOs4/attY+pciKvadNfFbg==}
+    dev: true
+
+  /lodash@4.17.21:
+    resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
+    dev: true
+
+  /loglevel-plugin-prefix@0.8.4:
+    resolution: {integrity: sha512-WpG9CcFAOjz/FtNht+QJeGpvVl/cdR6P0z6OcXSkr8wFJOsV2GRj2j10JLfjuA4aYkcKCNIEqRGCyTife9R8/g==}
+    dev: true
+
+  /loglevel@1.8.1:
+    resolution: {integrity: sha512-tCRIJM51SHjAayKwC+QAg8hT8vg6z7GSgLJKGvzuPb1Wc+hLzqtuVLxp6/HzSPOozuK+8ErAhy7U/sVzw8Dgfg==}
+    engines: {node: '>= 0.6.0'}
+    dev: true
+
+  /loupe@2.3.7:
+    resolution: {integrity: sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==}
+    dependencies:
+      get-func-name: 2.0.2
+    dev: true
+
+  /lowercase-keys@3.0.0:
+    resolution: {integrity: sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dev: true
+
+  /lru-cache@10.0.3:
+    resolution: {integrity: sha512-B7gr+F6MkqB3uzINHXNctGieGsRTMwIBgxkp0yq/5BwcuDzD4A8wQpHQW6vDAm1uKSLQghmRdD9sKqf2vJ1cEg==}
+    engines: {node: 14 || >=16.14}
+    dev: true
+
+  /lru-cache@6.0.0:
+    resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==}
+    engines: {node: '>=10'}
+    dependencies:
+      yallist: 4.0.0
+
+  /lru-cache@7.18.3:
+    resolution: {integrity: sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /magic-string@0.25.9:
+    resolution: {integrity: sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==}
+    dependencies:
+      sourcemap-codec: 1.4.8
+    dev: true
+
+  /magic-string@0.30.5:
+    resolution: {integrity: sha512-7xlpfBaQaP/T6Vh8MO/EqXSW5En6INHEvEXQiuff7Gku0PWjU3uf6w/j9o7O+SpB5fOAkrI5HeoNgwjEO0pFsA==}
+    engines: {node: '>=12'}
+    dependencies:
+      '@jridgewell/sourcemap-codec': 1.4.15
+    dev: false
+
+  /make-error@1.3.6:
+    resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==}
+
+  /mdn-data@2.0.30:
+    resolution: {integrity: sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==}
+    dev: false
+
+  /merge-stream@2.0.0:
+    resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==}
+    dev: true
+
+  /merge2@1.4.1:
+    resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==}
+    engines: {node: '>= 8'}
+
+  /micromatch@4.0.5:
+    resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==}
+    engines: {node: '>=8.6'}
+    dependencies:
+      braces: 3.0.2
+      picomatch: 2.3.1
+
+  /mimic-fn@2.1.0:
+    resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /mimic-response@3.1.0:
+    resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /mimic-response@4.0.0:
+    resolution: {integrity: sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dev: true
+
+  /minimatch@3.1.2:
+    resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==}
+    dependencies:
+      brace-expansion: 1.1.11
+
+  /minimatch@5.1.6:
+    resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==}
+    engines: {node: '>=10'}
+    dependencies:
+      brace-expansion: 2.0.1
+    dev: true
+
+  /minimatch@9.0.3:
+    resolution: {integrity: sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==}
+    engines: {node: '>=16 || 14 >=14.17'}
+    dependencies:
+      brace-expansion: 2.0.1
+    dev: true
+
+  /minimist@1.2.8:
+    resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==}
+    dev: true
+
+  /minipass@7.0.4:
+    resolution: {integrity: sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==}
+    engines: {node: '>=16 || 14 >=14.17'}
+    dev: true
+
+  /mitt@3.0.0:
+    resolution: {integrity: sha512-7dX2/10ITVyqh4aOSVI9gdape+t9l2/8QxHrFmUXu4EEUpdlxl6RudZUPZoc+zuY2hk1j7XxVroIVIan/pD/SQ==}
+    dev: true
+
+  /mkdirp-classic@0.5.3:
+    resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==}
+    dev: true
+
+  /mkdirp@0.5.6:
+    resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==}
+    hasBin: true
+    dependencies:
+      minimist: 1.2.8
+    dev: true
+
+  /mlly@1.4.2:
+    resolution: {integrity: sha512-i/Ykufi2t1EZ6NaPLdfnZk2AX8cs0d+mTzVKuPfqPKPatxLApaBoxJQ9x1/uckXtrS/U5oisPMDkNs0yQTaBRg==}
+    dependencies:
+      acorn: 8.11.2
+      pathe: 1.1.1
+      pkg-types: 1.0.3
+      ufo: 1.3.2
+    dev: true
+
+  /modern-node-polyfills@0.1.0:
+    resolution: {integrity: sha512-/Z9mlC56KBxjLZvdNSLqSEFw9jSav43dsUxhLYLN3bZgcSX5VFdixat+QGjb/4NxaGCwW09ABJhZA5oHFj4W4A==}
+    dependencies:
+      '@jspm/core': 2.0.0-beta.24
+      '@rollup/plugin-inject': 4.0.4(rollup@2.79.1)
+      acorn: 8.11.2
+      esbuild: 0.14.54
+      local-pkg: 0.4.3
+      rollup: 2.79.1
+    dev: true
+
+  /mrmime@1.0.1:
+    resolution: {integrity: sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /ms@2.1.2:
+    resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==}
+
+  /mz@2.7.0:
+    resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==}
+    dependencies:
+      any-promise: 1.3.0
+      object-assign: 4.1.1
+      thenify-all: 1.6.0
+    dev: true
+
+  /n12@0.4.0:
+    resolution: {integrity: sha512-p/hj4zQ8d3pbbFLQuN1K9honUxiDDhueOWyFLw/XgBv+wZCE44bcLH4CIcsolOceJQduh4Jf7m/LfaTxyGmGtQ==}
+    dev: true
+
+  /nanoid@3.3.7:
+    resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==}
+    engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
+    hasBin: true
+
+  /natural-compare-lite@1.4.0:
+    resolution: {integrity: sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==}
+    dev: false
+
+  /natural-compare@1.4.0:
+    resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
+    dev: false
+
+  /netmask@2.0.2:
+    resolution: {integrity: sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==}
+    engines: {node: '>= 0.4.0'}
+    dev: true
+
+  /node-domexception@1.0.0:
+    resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==}
+    engines: {node: '>=10.5.0'}
+    dev: true
+
+  /node-fetch@2.7.0:
+    resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==}
+    engines: {node: 4.x || >=6.0.0}
+    peerDependencies:
+      encoding: ^0.1.0
+    peerDependenciesMeta:
+      encoding:
+        optional: true
+    dependencies:
+      whatwg-url: 5.0.0
+    dev: true
+
+  /node-fetch@3.3.2:
+    resolution: {integrity: sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dependencies:
+      data-uri-to-buffer: 4.0.1
+      fetch-blob: 3.2.0
+      formdata-polyfill: 4.0.10
+    dev: true
+
+  /normalize-path@3.0.0:
+    resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /normalize-url@8.0.0:
+    resolution: {integrity: sha512-uVFpKhj5MheNBJRTiMZ9pE/7hD1QTeEvugSJW/OmLzAp78PB5O6adfMNTvmfKhXBkvCzC+rqifWcVYpGFwTjnw==}
+    engines: {node: '>=14.16'}
+    dev: true
+
+  /npm-run-path@4.0.1:
+    resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==}
+    engines: {node: '>=8'}
+    dependencies:
+      path-key: 3.1.1
+    dev: true
+
+  /object-assign@4.1.1:
+    resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /once@1.4.0:
+    resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==}
+    dependencies:
+      wrappy: 1.0.2
+
+  /onetime@5.1.2:
+    resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==}
+    engines: {node: '>=6'}
+    dependencies:
+      mimic-fn: 2.1.0
+    dev: true
+
+  /optionator@0.9.3:
+    resolution: {integrity: sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==}
+    engines: {node: '>= 0.8.0'}
+    dependencies:
+      '@aashutoshrathi/word-wrap': 1.2.6
+      deep-is: 0.1.4
+      fast-levenshtein: 2.0.6
+      levn: 0.4.1
+      prelude-ls: 1.2.1
+      type-check: 0.4.0
+    dev: false
+
+  /p-cancelable@3.0.0:
+    resolution: {integrity: sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==}
+    engines: {node: '>=12.20'}
+    dev: true
+
+  /p-limit@3.1.0:
+    resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==}
+    engines: {node: '>=10'}
+    dependencies:
+      yocto-queue: 0.1.0
+    dev: false
+
+  /p-limit@4.0.0:
+    resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==}
+    engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+    dependencies:
+      yocto-queue: 1.0.0
+    dev: true
+
+  /p-locate@5.0.0:
+    resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==}
+    engines: {node: '>=10'}
+    dependencies:
+      p-limit: 3.1.0
+    dev: false
+
+  /pac-proxy-agent@7.0.1:
+    resolution: {integrity: sha512-ASV8yU4LLKBAjqIPMbrgtaKIvxQri/yh2OpI+S6hVa9JRkUI3Y3NPFbfngDtY7oFtSMD3w31Xns89mDa3Feo5A==}
+    engines: {node: '>= 14'}
+    dependencies:
+      '@tootallnate/quickjs-emscripten': 0.23.0
+      agent-base: 7.1.0
+      debug: 4.3.4
+      get-uri: 6.0.2
+      http-proxy-agent: 7.0.0
+      https-proxy-agent: 7.0.2
+      pac-resolver: 7.0.0
+      socks-proxy-agent: 8.0.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /pac-resolver@7.0.0:
+    resolution: {integrity: sha512-Fd9lT9vJbHYRACT8OhCbZBbxr6KRSawSovFpy8nDGshaK99S/EBhVIHp9+crhxrsZOuvLpgL1n23iyPg6Rl2hg==}
+    engines: {node: '>= 14'}
+    dependencies:
+      degenerator: 5.0.1
+      ip: 1.1.8
+      netmask: 2.0.2
+    dev: true
+
+  /parent-module@1.0.1:
+    resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
+    engines: {node: '>=6'}
+    dependencies:
+      callsites: 3.1.0
+    dev: false
+
+  /path-exists@4.0.0:
+    resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
+    engines: {node: '>=8'}
+    dev: false
+
+  /path-is-absolute@1.0.1:
+    resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==}
+    engines: {node: '>=0.10.0'}
+
+  /path-key@3.1.1:
+    resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==}
+    engines: {node: '>=8'}
+
+  /path-parse@1.0.7:
+    resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
+
+  /path-scurry@1.10.1:
+    resolution: {integrity: sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==}
+    engines: {node: '>=16 || 14 >=14.17'}
+    dependencies:
+      lru-cache: 10.0.3
+      minipass: 7.0.4
+    dev: true
+
+  /path-type@4.0.0:
+    resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==}
+    engines: {node: '>=8'}
+
+  /pathe@1.1.1:
+    resolution: {integrity: sha512-d+RQGp0MAYTIaDBIMmOfMwz3E+LOZnxx1HZd5R18mmCZY0QBlK0LDZfPc8FW8Ed2DlvsuE6PRjroDY+wg4+j/Q==}
+    dev: true
+
+  /pathval@1.1.1:
+    resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==}
+    dev: true
+
+  /pend@1.2.0:
+    resolution: {integrity: sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==}
+    dev: true
+
+  /periscopic@3.1.0:
+    resolution: {integrity: sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==}
+    dependencies:
+      '@types/estree': 1.0.5
+      estree-walker: 3.0.3
+      is-reference: 3.0.2
+    dev: false
+
+  /picocolors@1.0.0:
+    resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==}
+
+  /picomatch@2.3.1:
+    resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
+    engines: {node: '>=8.6'}
+
+  /pirates@4.0.6:
+    resolution: {integrity: sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==}
+    engines: {node: '>= 6'}
+    dev: true
+
+  /pkg-types@1.0.3:
+    resolution: {integrity: sha512-nN7pYi0AQqJnoLPC9eHFQ8AcyaixBUOwvqc5TDnIKCMEE6I0y8P7OKA7fPexsXGCGxQDl/cmrLAp26LhcwxZ4A==}
+    dependencies:
+      jsonc-parser: 3.2.0
+      mlly: 1.4.2
+      pathe: 1.1.1
+    dev: true
+
+  /postcss-load-config@3.1.4(postcss@8.4.31)(ts-node@10.9.1):
+    resolution: {integrity: sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg==}
+    engines: {node: '>= 10'}
+    peerDependencies:
+      postcss: '>=8.0.9'
+      ts-node: '>=9.0.0'
+    peerDependenciesMeta:
+      postcss:
+        optional: true
+      ts-node:
+        optional: true
+    dependencies:
+      lilconfig: 2.1.0
+      postcss: 8.4.31
+      ts-node: 10.9.1(@types/node@20.9.3)(typescript@5.3.2)
+      yaml: 1.10.2
+
+  /postcss-safe-parser@6.0.0(postcss@8.4.31):
+    resolution: {integrity: sha512-FARHN8pwH+WiS2OPCxJI8FuRJpTVnn6ZNFiqAM2aeW2LwTHWWmWgIyKC6cUo0L8aeKiF/14MNvnpls6R2PBeMQ==}
+    engines: {node: '>=12.0'}
+    peerDependencies:
+      postcss: ^8.3.3
+    dependencies:
+      postcss: 8.4.31
+    dev: false
+
+  /postcss-scss@4.0.9(postcss@8.4.31):
+    resolution: {integrity: sha512-AjKOeiwAitL/MXxQW2DliT28EKukvvbEWx3LBmJIRN8KfBGZbRTxNYW0kSqi1COiTZ57nZ9NW06S6ux//N1c9A==}
+    engines: {node: '>=12.0'}
+    peerDependencies:
+      postcss: ^8.4.29
+    dependencies:
+      postcss: 8.4.31
+    dev: false
+
+  /postcss-selector-parser@6.0.13:
+    resolution: {integrity: sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==}
+    engines: {node: '>=4'}
+    dependencies:
+      cssesc: 3.0.0
+      util-deprecate: 1.0.2
+    dev: false
+
+  /postcss@8.4.31:
+    resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==}
+    engines: {node: ^10 || ^12 || >=14}
+    dependencies:
+      nanoid: 3.3.7
+      picocolors: 1.0.0
+      source-map-js: 1.0.2
+
+  /prelude-ls@1.2.1:
+    resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==}
+    engines: {node: '>= 0.8.0'}
+    dev: false
+
+  /prettier-linter-helpers@1.0.0:
+    resolution: {integrity: sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==}
+    engines: {node: '>=6.0.0'}
+    dependencies:
+      fast-diff: 1.3.0
+    dev: false
+
+  /prettier-plugin-svelte@3.1.1(prettier@3.1.0)(svelte@4.2.7):
+    resolution: {integrity: sha512-jLzaHfToav527/I5h2BMQfN3G5gylrJm54zFFyoXvUtfscI47877ftacUb+Eyse/3bXrhY+MtkyiuvruiHc+kg==}
+    peerDependencies:
+      prettier: ^3.0.0
+      svelte: ^3.2.0 || ^4.0.0-next.0 || ^5.0.0-next.0
+    dependencies:
+      prettier: 3.1.0
+      svelte: 4.2.7
+    dev: false
+
+  /prettier@3.1.0:
+    resolution: {integrity: sha512-TQLvXjq5IAibjh8EpBIkNKxO749UEWABoiIZehEPiY4GNpVdhaFKqSTu+QrlU6D2dPAfubRmtJTi4K4YkQ5eXw==}
+    engines: {node: '>=14'}
+    hasBin: true
+    dev: false
+
+  /pretty-format@27.5.1:
+    resolution: {integrity: sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==}
+    engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0}
+    dependencies:
+      ansi-regex: 5.0.1
+      ansi-styles: 5.2.0
+      react-is: 17.0.2
+    dev: true
+
+  /process-nextick-args@2.0.1:
+    resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==}
+    dev: true
+
+  /progress@2.0.3:
+    resolution: {integrity: sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==}
+    engines: {node: '>=0.4.0'}
+    dev: true
+
+  /proxy-agent@6.3.0:
+    resolution: {integrity: sha512-0LdR757eTj/JfuU7TL2YCuAZnxWXu3tkJbg4Oq3geW/qFNT/32T0sp2HnZ9O0lMR4q3vwAt0+xCA8SR0WAD0og==}
+    engines: {node: '>= 14'}
+    dependencies:
+      agent-base: 7.1.0
+      debug: 4.3.4
+      http-proxy-agent: 7.0.0
+      https-proxy-agent: 7.0.2
+      lru-cache: 7.18.3
+      pac-proxy-agent: 7.0.1
+      proxy-from-env: 1.1.0
+      socks-proxy-agent: 8.0.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /proxy-agent@6.3.1:
+    resolution: {integrity: sha512-Rb5RVBy1iyqOtNl15Cw/llpeLH8bsb37gM1FUfKQ+Wck6xHlbAhWGUFiTRHtkjqGTA5pSHz6+0hrPW/oECihPQ==}
+    engines: {node: '>= 14'}
+    dependencies:
+      agent-base: 7.1.0
+      debug: 4.3.4
+      http-proxy-agent: 7.0.0
+      https-proxy-agent: 7.0.2
+      lru-cache: 7.18.3
+      pac-proxy-agent: 7.0.1
+      proxy-from-env: 1.1.0
+      socks-proxy-agent: 8.0.2
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /proxy-from-env@1.1.0:
+    resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==}
+    dev: true
+
+  /pump@3.0.0:
+    resolution: {integrity: sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==}
+    dependencies:
+      end-of-stream: 1.4.4
+      once: 1.4.0
+    dev: true
+
+  /punycode@2.3.1:
+    resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==}
+    engines: {node: '>=6'}
+
+  /puppeteer-core@20.9.0(typescript@5.3.2):
+    resolution: {integrity: sha512-H9fYZQzMTRrkboEfPmf7m3CLDN6JvbxXA3qTtS+dFt27tR+CsFHzPsT6pzp6lYL6bJbAPaR0HaPO6uSi+F94Pg==}
+    engines: {node: '>=16.3.0'}
+    peerDependencies:
+      typescript: '>= 4.7.4'
+    peerDependenciesMeta:
+      typescript:
+        optional: true
+    dependencies:
+      '@puppeteer/browsers': 1.4.6(typescript@5.3.2)
+      chromium-bidi: 0.4.16(devtools-protocol@0.0.1147663)
+      cross-fetch: 4.0.0
+      debug: 4.3.4
+      devtools-protocol: 0.0.1147663
+      typescript: 5.3.2
+      ws: 8.13.0
+    transitivePeerDependencies:
+      - bufferutil
+      - encoding
+      - supports-color
+      - utf-8-validate
+    dev: true
+
+  /query-selector-shadow-dom@1.0.1:
+    resolution: {integrity: sha512-lT5yCqEBgfoMYpf3F2xQRK7zEr1rhIIZuceDK6+xRkJQ4NMbHTwXqk4NkwDwQMNqXgG9r9fyHnzwNVs6zV5KRw==}
+    dev: true
+
+  /queue-microtask@1.2.3:
+    resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
+
+  /queue-tick@1.0.1:
+    resolution: {integrity: sha512-kJt5qhMxoszgU/62PLP1CJytzd2NKetjSRnyuj31fDd3Rlcz3fzlFdFLD1SItunPwyqEOkca6GbV612BWfaBag==}
+    dev: true
+
+  /quick-lru@5.1.1:
+    resolution: {integrity: sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /react-is@17.0.2:
+    resolution: {integrity: sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==}
+    dev: true
+
+  /readable-stream@2.3.8:
+    resolution: {integrity: sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==}
+    dependencies:
+      core-util-is: 1.0.3
+      inherits: 2.0.4
+      isarray: 1.0.0
+      process-nextick-args: 2.0.1
+      safe-buffer: 5.1.2
+      string_decoder: 1.1.1
+      util-deprecate: 1.0.2
+    dev: true
+
+  /readable-stream@3.6.2:
+    resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==}
+    engines: {node: '>= 6'}
+    dependencies:
+      inherits: 2.0.4
+      string_decoder: 1.3.0
+      util-deprecate: 1.0.2
+    dev: true
+
+  /readdir-glob@1.1.3:
+    resolution: {integrity: sha512-v05I2k7xN8zXvPD9N+z/uhXPaj0sUFCe2rcWZIpBsqxfP7xXFQ0tipAd/wjj1YxWyWtUS5IDJpOG82JKt2EAVA==}
+    dependencies:
+      minimatch: 5.1.6
+    dev: true
+
+  /readdirp@3.6.0:
+    resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==}
+    engines: {node: '>=8.10.0'}
+    dependencies:
+      picomatch: 2.3.1
+    dev: true
+
+  /require-directory@2.1.1:
+    resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /resolve-alpn@1.2.1:
+    resolution: {integrity: sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==}
+    dev: true
+
+  /resolve-from@4.0.0:
+    resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==}
+    engines: {node: '>=4'}
+    dev: false
+
+  /resolve-from@5.0.0:
+    resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==}
+    engines: {node: '>=8'}
+    dev: true
+
+  /resolve@1.22.8:
+    resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==}
+    hasBin: true
+    dependencies:
+      is-core-module: 2.13.1
+      path-parse: 1.0.7
+      supports-preserve-symlinks-flag: 1.0.0
+
+  /responselike@3.0.0:
+    resolution: {integrity: sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==}
+    engines: {node: '>=14.16'}
+    dependencies:
+      lowercase-keys: 3.0.0
+    dev: true
+
+  /resq@1.11.0:
+    resolution: {integrity: sha512-G10EBz+zAAy3zUd/CDoBbXRL6ia9kOo3xRHrMDsHljI0GDkhYlyjwoCx5+3eCC4swi1uCoZQhskuJkj7Gp57Bw==}
+    dependencies:
+      fast-deep-equal: 2.0.1
+    dev: true
+
+  /reusify@1.0.4:
+    resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==}
+    engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
+
+  /rgb2hex@0.2.5:
+    resolution: {integrity: sha512-22MOP1Rh7sAo1BZpDG6R5RFYzR2lYEgwq7HEmyW2qcsOqR2lQKmn+O//xV3YG/0rrhMC6KVX2hU+ZXuaw9a5bw==}
+    dev: true
+
+  /rimraf@2.7.1:
+    resolution: {integrity: sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==}
+    hasBin: true
+    dependencies:
+      glob: 7.2.3
+    dev: true
+
+  /rimraf@3.0.2:
+    resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==}
+    hasBin: true
+    dependencies:
+      glob: 7.2.3
+    dev: false
+
+  /rollup-plugin-inject@3.0.2:
+    resolution: {integrity: sha512-ptg9PQwzs3orn4jkgXJ74bfs5vYz1NCZlSQMBUA0wKcGp5i5pA1AO3fOUEte8enhGUC+iapTCzEWw2jEFFUO/w==}
+    deprecated: This package has been deprecated and is no longer maintained. Please use @rollup/plugin-inject.
+    dependencies:
+      estree-walker: 0.6.1
+      magic-string: 0.25.9
+      rollup-pluginutils: 2.8.2
+    dev: true
+
+  /rollup-plugin-node-polyfills@0.2.1:
+    resolution: {integrity: sha512-4kCrKPTJ6sK4/gLL/U5QzVT8cxJcofO0OU74tnB19F40cmuAKSzH5/siithxlofFEjwvw1YAhPmbvGNA6jEroA==}
+    dependencies:
+      rollup-plugin-inject: 3.0.2
+    dev: true
+
+  /rollup-pluginutils@2.8.2:
+    resolution: {integrity: sha512-EEp9NhnUkwY8aif6bxgovPHMoMoNr2FulJziTndpt5H9RdwC47GSGuII9XxpSdzVGM0GWrNPHV6ie1LTNJPaLQ==}
+    dependencies:
+      estree-walker: 0.6.1
+    dev: true
+
+  /rollup@2.79.1:
+    resolution: {integrity: sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==}
+    engines: {node: '>=10.0.0'}
+    hasBin: true
+    optionalDependencies:
+      fsevents: 2.3.3
+    dev: true
+
+  /rollup@3.29.4:
+    resolution: {integrity: sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==}
+    engines: {node: '>=14.18.0', npm: '>=8.0.0'}
+    hasBin: true
+    optionalDependencies:
+      fsevents: 2.3.3
+
+  /run-parallel@1.2.0:
+    resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==}
+    dependencies:
+      queue-microtask: 1.2.3
+
+  /safaridriver@0.1.0:
+    resolution: {integrity: sha512-azzzIP3gR1TB9bVPv7QO4Zjw0rR1BWEU/s2aFdUMN48gxDjxEB13grAEuXDmkKPgE74cObymDxmAmZnL3clj4w==}
+    dev: true
+
+  /safe-buffer@5.1.2:
+    resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==}
+    dev: true
+
+  /safe-buffer@5.2.1:
+    resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==}
+    dev: true
+
+  /semver@7.5.4:
+    resolution: {integrity: sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==}
+    engines: {node: '>=10'}
+    hasBin: true
+    dependencies:
+      lru-cache: 6.0.0
+
+  /serialize-error@11.0.3:
+    resolution: {integrity: sha512-2G2y++21dhj2R7iHAdd0FIzjGwuKZld+7Pl/bTU6YIkrC2ZMbVUjm+luj6A6V34Rv9XfKJDKpTWu9W4Gse1D9g==}
+    engines: {node: '>=14.16'}
+    dependencies:
+      type-fest: 2.19.0
+    dev: true
+
+  /setimmediate@1.0.5:
+    resolution: {integrity: sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==}
+    dev: true
+
+  /shebang-command@2.0.0:
+    resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==}
+    engines: {node: '>=8'}
+    dependencies:
+      shebang-regex: 3.0.0
+
+  /shebang-regex@3.0.0:
+    resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==}
+    engines: {node: '>=8'}
+
+  /siginfo@2.0.0:
+    resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==}
+    dev: true
+
+  /signal-exit@3.0.7:
+    resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==}
+    dev: true
+
+  /signal-exit@4.1.0:
+    resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==}
+    engines: {node: '>=14'}
+    dev: true
+
+  /sirv@2.0.3:
+    resolution: {integrity: sha512-O9jm9BsID1P+0HOi81VpXPoDxYP374pkOLzACAoyUQ/3OUVndNpsz6wMnY2z+yOxzbllCKZrM+9QrWsv4THnyA==}
+    engines: {node: '>= 10'}
+    dependencies:
+      '@polka/url': 1.0.0-next.23
+      mrmime: 1.0.1
+      totalist: 3.0.1
+    dev: true
+
+  /slash@3.0.0:
+    resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==}
+    engines: {node: '>=8'}
+
+  /slice-ansi@5.0.0:
+    resolution: {integrity: sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==}
+    engines: {node: '>=12'}
+    dependencies:
+      ansi-styles: 6.2.1
+      is-fullwidth-code-point: 4.0.0
+    dev: true
+
+  /smart-buffer@4.2.0:
+    resolution: {integrity: sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==}
+    engines: {node: '>= 6.0.0', npm: '>= 3.0.0'}
+    dev: true
+
+  /socks-proxy-agent@8.0.2:
+    resolution: {integrity: sha512-8zuqoLv1aP/66PHF5TqwJ7Czm3Yv32urJQHrVyhD7mmA6d61Zv8cIXQYPTWwmg6qlupnPvs/QKDmfa4P/qct2g==}
+    engines: {node: '>= 14'}
+    dependencies:
+      agent-base: 7.1.0
+      debug: 4.3.4
+      socks: 2.7.1
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /socks@2.7.1:
+    resolution: {integrity: sha512-7maUZy1N7uo6+WVEX6psASxtNlKaNVMlGQKkG/63nEDdLOWNbiUMoLK7X4uYoLhQstau72mLgfEWcXcwsaHbYQ==}
+    engines: {node: '>= 10.13.0', npm: '>= 3.0.0'}
+    dependencies:
+      ip: 2.0.0
+      smart-buffer: 4.2.0
+    dev: true
+
+  /source-map-js@1.0.2:
+    resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==}
+    engines: {node: '>=0.10.0'}
+
+  /source-map@0.6.1:
+    resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==}
+    engines: {node: '>=0.10.0'}
+    dev: true
+
+  /source-map@0.8.0-beta.0:
+    resolution: {integrity: sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA==}
+    engines: {node: '>= 8'}
+    dependencies:
+      whatwg-url: 7.1.0
+    dev: true
+
+  /sourcemap-codec@1.4.8:
+    resolution: {integrity: sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==}
+    deprecated: Please use @jridgewell/sourcemap-codec instead
+    dev: true
+
+  /split2@4.2.0:
+    resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==}
+    engines: {node: '>= 10.x'}
+    dev: true
+
+  /stackback@0.0.2:
+    resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==}
+    dev: true
+
+  /std-env@3.5.0:
+    resolution: {integrity: sha512-JGUEaALvL0Mf6JCfYnJOTcobY+Nc7sG/TemDRBqCA0wEr4DER7zDchaaixTlmOxAjG1uRJmX82EQcxwTQTkqVA==}
+    dev: true
+
+  /streamx@2.15.5:
+    resolution: {integrity: sha512-9thPGMkKC2GctCzyCUjME3yR03x2xNo0GPKGkRw2UMYN+gqWa9uqpyNWhmsNCutU5zHmkUum0LsCRQTXUgUCAg==}
+    dependencies:
+      fast-fifo: 1.3.2
+      queue-tick: 1.0.1
+    dev: true
+
+  /string-width@4.2.3:
+    resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
+    engines: {node: '>=8'}
+    dependencies:
+      emoji-regex: 8.0.0
+      is-fullwidth-code-point: 3.0.0
+      strip-ansi: 6.0.1
+    dev: true
+
+  /string-width@5.1.2:
+    resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==}
+    engines: {node: '>=12'}
+    dependencies:
+      eastasianwidth: 0.2.0
+      emoji-regex: 9.2.2
+      strip-ansi: 7.1.0
+    dev: true
+
+  /string_decoder@1.1.1:
+    resolution: {integrity: sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==}
+    dependencies:
+      safe-buffer: 5.1.2
+    dev: true
+
+  /string_decoder@1.3.0:
+    resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==}
+    dependencies:
+      safe-buffer: 5.2.1
+    dev: true
+
+  /strip-ansi@6.0.1:
+    resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==}
+    engines: {node: '>=8'}
+    dependencies:
+      ansi-regex: 5.0.1
+
+  /strip-ansi@7.1.0:
+    resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==}
+    engines: {node: '>=12'}
+    dependencies:
+      ansi-regex: 6.0.1
+    dev: true
+
+  /strip-final-newline@2.0.0:
+    resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /strip-json-comments@3.1.1:
+    resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==}
+    engines: {node: '>=8'}
+    dev: false
+
+  /strip-literal@1.3.0:
+    resolution: {integrity: sha512-PugKzOsyXpArk0yWmUwqOZecSO0GH0bPoctLcqNDH9J04pVW3lflYE0ujElBGTloevcxF5MofAOZ7C5l2b+wLg==}
+    dependencies:
+      acorn: 8.11.2
+    dev: true
+
+  /sucrase@3.34.0:
+    resolution: {integrity: sha512-70/LQEZ07TEcxiU2dz51FKaE6hCTWC6vr7FOk3Gr0U60C3shtAN+H+BFr9XlYe5xqf3RA8nrc+VIwzCfnxuXJw==}
+    engines: {node: '>=8'}
+    hasBin: true
+    dependencies:
+      '@jridgewell/gen-mapping': 0.3.3
+      commander: 4.1.1
+      glob: 7.1.6
+      lines-and-columns: 1.2.4
+      mz: 2.7.0
+      pirates: 4.0.6
+      ts-interface-checker: 0.1.13
+    dev: true
+
+  /supports-color@7.2.0:
+    resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==}
+    engines: {node: '>=8'}
+    dependencies:
+      has-flag: 4.0.0
+
+  /supports-preserve-symlinks-flag@1.0.0:
+    resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==}
+    engines: {node: '>= 0.4'}
+
+  /svelte-eslint-parser@0.33.1(svelte@4.2.7):
+    resolution: {integrity: sha512-vo7xPGTlKBGdLH8T5L64FipvTrqv3OQRx9d2z5X05KKZDlF4rQk8KViZO4flKERY+5BiVdOh7zZ7JGJWo5P0uA==}
+    engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
+    peerDependencies:
+      svelte: ^3.37.0 || ^4.0.0
+    peerDependenciesMeta:
+      svelte:
+        optional: true
+    dependencies:
+      eslint-scope: 7.2.2
+      eslint-visitor-keys: 3.4.3
+      espree: 9.6.1
+      postcss: 8.4.31
+      postcss-scss: 4.0.9(postcss@8.4.31)
+      svelte: 4.2.7
+    dev: false
+
+  /svelte@4.2.7:
+    resolution: {integrity: sha512-UExR1KS7raTdycsUrKLtStayu4hpdV3VZQgM0akX8XbXgLBlosdE/Sf3crOgyh9xIjqSYB3UEBuUlIQKRQX2hg==}
+    engines: {node: '>=16'}
+    dependencies:
+      '@ampproject/remapping': 2.2.1
+      '@jridgewell/sourcemap-codec': 1.4.15
+      '@jridgewell/trace-mapping': 0.3.20
+      acorn: 8.11.2
+      aria-query: 5.3.0
+      axobject-query: 3.2.1
+      code-red: 1.0.4
+      css-tree: 2.3.1
+      estree-walker: 3.0.3
+      is-reference: 3.0.2
+      locate-character: 3.0.0
+      magic-string: 0.30.5
+      periscopic: 3.1.0
+    dev: false
+
+  /tar-fs@3.0.4:
+    resolution: {integrity: sha512-5AFQU8b9qLfZCX9zp2duONhPmZv0hGYiBPJsyUdqMjzq/mqVpy/rEUSeHk1+YitmxugaptgBh5oDGU3VsAJq4w==}
+    dependencies:
+      mkdirp-classic: 0.5.3
+      pump: 3.0.0
+      tar-stream: 3.1.6
+    dev: true
+
+  /tar-stream@3.1.6:
+    resolution: {integrity: sha512-B/UyjYwPpMBv+PaFSWAmtYjwdrlEaZQEhMIBFNC5oEG8lpiW8XjcSdmEaClj28ArfKScKHs2nshz3k2le6crsg==}
+    dependencies:
+      b4a: 1.6.4
+      fast-fifo: 1.3.2
+      streamx: 2.15.5
+    dev: true
+
+  /text-table@0.2.0:
+    resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
+    dev: false
+
+  /thenify-all@1.6.0:
+    resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==}
+    engines: {node: '>=0.8'}
+    dependencies:
+      thenify: 3.3.1
+    dev: true
+
+  /thenify@3.3.1:
+    resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==}
+    dependencies:
+      any-promise: 1.3.0
+    dev: true
+
+  /through@2.3.8:
+    resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==}
+    dev: true
+
+  /tinybench@2.5.1:
+    resolution: {integrity: sha512-65NKvSuAVDP/n4CqH+a9w2kTlLReS9vhsAP06MWx+/89nMinJyB2icyl58RIcqCmIggpojIGeuJGhjU1aGMBSg==}
+    dev: true
+
+  /tinypool@0.4.0:
+    resolution: {integrity: sha512-2ksntHOKf893wSAH4z/+JbPpi92esw8Gn9N2deXX+B0EO92hexAVI9GIZZPx7P5aYo5KULfeOSt3kMOmSOy6uA==}
+    engines: {node: '>=14.0.0'}
+    dev: true
+
+  /tinyspy@1.1.1:
+    resolution: {integrity: sha512-UVq5AXt/gQlti7oxoIg5oi/9r0WpF7DGEVwXgqWSMmyN16+e3tl5lIvTaOpJ3TAtu5xFzWccFRM4R5NaWHF+4g==}
+    engines: {node: '>=14.0.0'}
+    dev: true
+
+  /to-regex-range@5.0.1:
+    resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==}
+    engines: {node: '>=8.0'}
+    dependencies:
+      is-number: 7.0.0
+
+  /totalist@3.0.1:
+    resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==}
+    engines: {node: '>=6'}
+    dev: true
+
+  /tr46@0.0.3:
+    resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==}
+    dev: true
+
+  /tr46@1.0.1:
+    resolution: {integrity: sha512-dTpowEjclQ7Kgx5SdBkqRzVhERQXov8/l9Ft9dVM9fmg0W0KQSVaXX9T4i6twCPNtYiZM53lpSSUAwJbFPOHxA==}
+    dependencies:
+      punycode: 2.3.1
+    dev: true
+
+  /traverse@0.3.9:
+    resolution: {integrity: sha512-iawgk0hLP3SxGKDfnDJf8wTz4p2qImnyihM5Hh/sGvQ3K37dPi/w8sRhdNIxYA1TwFwc5mDhIJq+O0RsvXBKdQ==}
+    dev: true
+
+  /tree-kill@1.2.2:
+    resolution: {integrity: sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==}
+    hasBin: true
+    dev: true
+
+  /ts-interface-checker@0.1.13:
+    resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==}
+    dev: true
+
+  /ts-node@10.9.1(@types/node@20.9.3)(typescript@5.3.2):
+    resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==}
+    hasBin: true
+    peerDependencies:
+      '@swc/core': '>=1.2.50'
+      '@swc/wasm': '>=1.2.50'
+      '@types/node': '*'
+      typescript: '>=2.7'
+    peerDependenciesMeta:
+      '@swc/core':
+        optional: true
+      '@swc/wasm':
+        optional: true
+    dependencies:
+      '@cspotcode/source-map-support': 0.8.1
+      '@tsconfig/node10': 1.0.9
+      '@tsconfig/node12': 1.0.11
+      '@tsconfig/node14': 1.0.3
+      '@tsconfig/node16': 1.0.4
+      '@types/node': 20.9.3
+      acorn: 8.11.2
+      acorn-walk: 8.3.0
+      arg: 4.1.3
+      create-require: 1.1.1
+      diff: 4.0.2
+      make-error: 1.3.6
+      typescript: 5.3.2
+      v8-compile-cache-lib: 3.0.1
+      yn: 3.1.1
+
+  /tslib@1.14.1:
+    resolution: {integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==}
+    dev: false
+
+  /tslib@2.6.2:
+    resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==}
+    dev: true
+
+  /tsup@6.7.0(postcss@8.4.31)(ts-node@10.9.1)(typescript@5.3.2):
+    resolution: {integrity: sha512-L3o8hGkaHnu5TdJns+mCqFsDBo83bJ44rlK7e6VdanIvpea4ArPcU3swWGsLVbXak1PqQx/V+SSmFPujBK+zEQ==}
+    engines: {node: '>=14.18'}
+    hasBin: true
+    peerDependencies:
+      '@swc/core': ^1
+      postcss: ^8.4.12
+      typescript: '>=4.1.0'
+    peerDependenciesMeta:
+      '@swc/core':
+        optional: true
+      postcss:
+        optional: true
+      typescript:
+        optional: true
+    dependencies:
+      bundle-require: 4.0.2(esbuild@0.17.19)
+      cac: 6.7.14
+      chokidar: 3.5.3
+      debug: 4.3.4
+      esbuild: 0.17.19
+      execa: 5.1.1
+      globby: 11.1.0
+      joycon: 3.1.1
+      postcss: 8.4.31
+      postcss-load-config: 3.1.4(postcss@8.4.31)(ts-node@10.9.1)
+      resolve-from: 5.0.0
+      rollup: 3.29.4
+      source-map: 0.8.0-beta.0
+      sucrase: 3.34.0
+      tree-kill: 1.2.2
+      typescript: 5.3.2
+    transitivePeerDependencies:
+      - supports-color
+      - ts-node
+    dev: true
+
+  /tsutils@3.21.0(typescript@5.3.2):
+    resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==}
+    engines: {node: '>= 6'}
+    peerDependencies:
+      typescript: '>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta'
+    dependencies:
+      tslib: 1.14.1
+      typescript: 5.3.2
+    dev: false
+
+  /type-check@0.4.0:
+    resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==}
+    engines: {node: '>= 0.8.0'}
+    dependencies:
+      prelude-ls: 1.2.1
+    dev: false
+
+  /type-detect@4.0.8:
+    resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==}
+    engines: {node: '>=4'}
+    dev: true
+
+  /type-fest@0.20.2:
+    resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==}
+    engines: {node: '>=10'}
+    dev: false
+
+  /type-fest@2.13.0:
+    resolution: {integrity: sha512-lPfAm42MxE4/456+QyIaaVBAwgpJb6xZ8PRu09utnhPdWwcyj9vgy6Sq0Z5yNbJ21EdxB5dRU/Qg8bsyAMtlcw==}
+    engines: {node: '>=12.20'}
+    dev: true
+
+  /type-fest@2.19.0:
+    resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==}
+    engines: {node: '>=12.20'}
+    dev: true
+
+  /typescript@5.3.2:
+    resolution: {integrity: sha512-6l+RyNy7oAHDfxC4FzSJcz9vnjTKxrLpDG5M2Vu4SHRVNg6xzqZp6LYSR9zjqQTu8DU/f5xwxUdADOkbrIX2gQ==}
+    engines: {node: '>=14.17'}
+    hasBin: true
+
+  /ufo@1.3.2:
+    resolution: {integrity: sha512-o+ORpgGwaYQXgqGDwd+hkS4PuZ3QnmqMMxRuajK/a38L6fTpcE5GPIfrf+L/KemFzfUpeUQc1rRS1iDBozvnFA==}
+    dev: true
+
+  /unbzip2-stream@1.4.3:
+    resolution: {integrity: sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==}
+    dependencies:
+      buffer: 5.7.1
+      through: 2.3.8
+    dev: true
+
+  /undici-types@5.26.5:
+    resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
+
+  /universalify@0.1.2:
+    resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==}
+    engines: {node: '>= 4.0.0'}
+    dev: true
+
+  /unzipper@0.10.14:
+    resolution: {integrity: sha512-ti4wZj+0bQTiX2KmKWuwj7lhV+2n//uXEotUmGuQqrbVZSEGFMbI68+c6JCQ8aAmUWYvtHEz2A8K6wXvueR/6g==}
+    dependencies:
+      big-integer: 1.6.52
+      binary: 0.3.0
+      bluebird: 3.4.7
+      buffer-indexof-polyfill: 1.0.2
+      duplexer2: 0.1.4
+      fstream: 1.0.12
+      graceful-fs: 4.2.11
+      listenercount: 1.0.1
+      readable-stream: 2.3.8
+      setimmediate: 1.0.5
+    dev: true
+
+  /uri-js@4.4.1:
+    resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==}
+    dependencies:
+      punycode: 2.3.1
+    dev: false
+
+  /userhome@1.0.0:
+    resolution: {integrity: sha512-ayFKY3H+Pwfy4W98yPdtH1VqH4psDeyW8lYYFzfecR9d6hqLpqhecktvYR3SEEXt7vG0S1JEpciI3g94pMErig==}
+    engines: {node: '>= 0.8.0'}
+    dev: true
+
+  /util-deprecate@1.0.2:
+    resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
+
+  /v8-compile-cache-lib@3.0.1:
+    resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==}
+
+  /vite-node@0.29.8(@types/node@20.9.3):
+    resolution: {integrity: sha512-b6OtCXfk65L6SElVM20q5G546yu10/kNrhg08afEoWlFRJXFq9/6glsvSVY+aI6YeC1tu2TtAqI2jHEQmOmsFw==}
+    engines: {node: '>=v14.16.0'}
+    hasBin: true
+    dependencies:
+      cac: 6.7.14
+      debug: 4.3.4
+      mlly: 1.4.2
+      pathe: 1.1.1
+      picocolors: 1.0.0
+      vite: 4.1.4(@types/node@20.9.3)
+    transitivePeerDependencies:
+      - '@types/node'
+      - less
+      - sass
+      - stylus
+      - sugarss
+      - supports-color
+      - terser
+    dev: true
+
+  /vite@4.1.4(@types/node@20.9.3):
+    resolution: {integrity: sha512-3knk/HsbSTKEin43zHu7jTwYWv81f8kgAL99G5NWBcA1LKvtvcVAC4JjBH1arBunO9kQka+1oGbrMKOjk4ZrBg==}
+    engines: {node: ^14.18.0 || >=16.0.0}
+    hasBin: true
+    peerDependencies:
+      '@types/node': '>= 14'
+      less: '*'
+      sass: '*'
+      stylus: '*'
+      sugarss: '*'
+      terser: ^5.4.0
+    peerDependenciesMeta:
+      '@types/node':
+        optional: true
+      less:
+        optional: true
+      sass:
+        optional: true
+      stylus:
+        optional: true
+      sugarss:
+        optional: true
+      terser:
+        optional: true
+    dependencies:
+      '@types/node': 20.9.3
+      esbuild: 0.16.17
+      postcss: 8.4.31
+      resolve: 1.22.8
+      rollup: 3.29.4
+    optionalDependencies:
+      fsevents: 2.3.3
+
+  /vitest@0.29.8(@vitest/browser@0.29.8)(webdriverio@8.23.4):
+    resolution: {integrity: sha512-JIAVi2GK5cvA6awGpH0HvH/gEG9PZ0a/WoxdiV3PmqK+3CjQMf8c+J/Vhv4mdZ2nRyXFw66sAg6qz7VNkaHfDQ==}
+    engines: {node: '>=v14.16.0'}
+    hasBin: true
+    peerDependencies:
+      '@edge-runtime/vm': '*'
+      '@vitest/browser': '*'
+      '@vitest/ui': '*'
+      happy-dom: '*'
+      jsdom: '*'
+      playwright: '*'
+      safaridriver: '*'
+      webdriverio: '*'
+    peerDependenciesMeta:
+      '@edge-runtime/vm':
+        optional: true
+      '@vitest/browser':
+        optional: true
+      '@vitest/ui':
+        optional: true
+      happy-dom:
+        optional: true
+      jsdom:
+        optional: true
+      playwright:
+        optional: true
+      safaridriver:
+        optional: true
+      webdriverio:
+        optional: true
+    dependencies:
+      '@types/chai': 4.3.11
+      '@types/chai-subset': 1.3.5
+      '@types/node': 20.9.3
+      '@vitest/browser': 0.29.8(vitest@0.29.8)
+      '@vitest/expect': 0.29.8
+      '@vitest/runner': 0.29.8
+      '@vitest/spy': 0.29.8
+      '@vitest/utils': 0.29.8
+      acorn: 8.11.2
+      acorn-walk: 8.3.0
+      cac: 6.7.14
+      chai: 4.3.10
+      debug: 4.3.4
+      local-pkg: 0.4.3
+      pathe: 1.1.1
+      picocolors: 1.0.0
+      source-map: 0.6.1
+      std-env: 3.5.0
+      strip-literal: 1.3.0
+      tinybench: 2.5.1
+      tinypool: 0.4.0
+      tinyspy: 1.1.1
+      vite: 4.1.4(@types/node@20.9.3)
+      vite-node: 0.29.8(@types/node@20.9.3)
+      webdriverio: 8.23.4(typescript@5.3.2)
+      why-is-node-running: 2.2.2
+    transitivePeerDependencies:
+      - less
+      - sass
+      - stylus
+      - sugarss
+      - supports-color
+      - terser
+    dev: true
+
+  /wait-port@1.1.0:
+    resolution: {integrity: sha512-3e04qkoN3LxTMLakdqeWth8nih8usyg+sf1Bgdf9wwUkp05iuK1eSY/QpLvscT/+F/gA89+LpUmmgBtesbqI2Q==}
+    engines: {node: '>=10'}
+    hasBin: true
+    dependencies:
+      chalk: 4.1.2
+      commander: 9.5.0
+      debug: 4.3.4
+    transitivePeerDependencies:
+      - supports-color
+    dev: true
+
+  /web-streams-polyfill@3.2.1:
+    resolution: {integrity: sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==}
+    engines: {node: '>= 8'}
+    dev: true
+
+  /webdriver@8.23.1:
+    resolution: {integrity: sha512-0PLN6cqP5cSorZBU2OBk2XKhxKpWWKzvClHBiGCqZIuofZ3kPTq7uYFapej0c4xFmKXHEiLIN7Qkt4H3gWTs8g==}
+    engines: {node: ^16.13 || >=18}
+    dependencies:
+      '@types/node': 20.9.3
+      '@types/ws': 8.5.10
+      '@wdio/config': 8.23.1
+      '@wdio/logger': 8.16.17
+      '@wdio/protocols': 8.23.0
+      '@wdio/types': 8.23.1
+      '@wdio/utils': 8.23.1
+      deepmerge-ts: 5.1.0
+      got: 12.6.1
+      ky: 0.33.3
+      ws: 8.14.2
+    transitivePeerDependencies:
+      - bufferutil
+      - supports-color
+      - utf-8-validate
+    dev: true
+
+  /webdriverio@8.23.4(typescript@5.3.2):
+    resolution: {integrity: sha512-tlma460ls27zv5Z+WHZG99SJrgcIZi4jsFrZeCCPZTtspOvXoqImL7g6orJTOJXVMhqptkFZN16zHONuAoXV5Q==}
+    engines: {node: ^16.13 || >=18}
+    peerDependencies:
+      devtools: ^8.14.0
+    peerDependenciesMeta:
+      devtools:
+        optional: true
+    dependencies:
+      '@types/node': 20.9.3
+      '@wdio/config': 8.23.1
+      '@wdio/logger': 8.16.17
+      '@wdio/protocols': 8.23.0
+      '@wdio/repl': 8.23.1
+      '@wdio/types': 8.23.1
+      '@wdio/utils': 8.23.1
+      archiver: 6.0.1
+      aria-query: 5.3.0
+      css-shorthand-properties: 1.1.1
+      css-value: 0.0.1
+      devtools-protocol: 0.0.1213968
+      grapheme-splitter: 1.0.4
+      import-meta-resolve: 3.1.1
+      is-plain-obj: 4.1.0
+      lodash.clonedeep: 4.5.0
+      lodash.zip: 4.2.0
+      minimatch: 9.0.3
+      puppeteer-core: 20.9.0(typescript@5.3.2)
+      query-selector-shadow-dom: 1.0.1
+      resq: 1.11.0
+      rgb2hex: 0.2.5
+      serialize-error: 11.0.3
+      webdriver: 8.23.1
+    transitivePeerDependencies:
+      - bufferutil
+      - encoding
+      - supports-color
+      - typescript
+      - utf-8-validate
+    dev: true
+
+  /webidl-conversions@3.0.1:
+    resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==}
+    dev: true
+
+  /webidl-conversions@4.0.2:
+    resolution: {integrity: sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==}
+    dev: true
+
+  /whatwg-url@5.0.0:
+    resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==}
+    dependencies:
+      tr46: 0.0.3
+      webidl-conversions: 3.0.1
+    dev: true
+
+  /whatwg-url@7.1.0:
+    resolution: {integrity: sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==}
+    dependencies:
+      lodash.sortby: 4.7.0
+      tr46: 1.0.1
+      webidl-conversions: 4.0.2
+    dev: true
+
+  /which@2.0.2:
+    resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==}
+    engines: {node: '>= 8'}
+    hasBin: true
+    dependencies:
+      isexe: 2.0.0
+
+  /which@4.0.0:
+    resolution: {integrity: sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==}
+    engines: {node: ^16.13.0 || >=18.0.0}
+    hasBin: true
+    dependencies:
+      isexe: 3.1.1
+    dev: true
+
+  /why-is-node-running@2.2.2:
+    resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==}
+    engines: {node: '>=8'}
+    hasBin: true
+    dependencies:
+      siginfo: 2.0.0
+      stackback: 0.0.2
+    dev: true
+
+  /wrap-ansi@7.0.0:
+    resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
+    engines: {node: '>=10'}
+    dependencies:
+      ansi-styles: 4.3.0
+      string-width: 4.2.3
+      strip-ansi: 6.0.1
+    dev: true
+
+  /wrap-ansi@8.1.0:
+    resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==}
+    engines: {node: '>=12'}
+    dependencies:
+      ansi-styles: 6.2.1
+      string-width: 5.1.2
+      strip-ansi: 7.1.0
+    dev: true
+
+  /wrappy@1.0.2:
+    resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
+
+  /ws@8.13.0:
+    resolution: {integrity: sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==}
+    engines: {node: '>=10.0.0'}
+    peerDependencies:
+      bufferutil: ^4.0.1
+      utf-8-validate: '>=5.0.2'
+    peerDependenciesMeta:
+      bufferutil:
+        optional: true
+      utf-8-validate:
+        optional: true
+    dev: true
+
+  /ws@8.14.2:
+    resolution: {integrity: sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==}
+    engines: {node: '>=10.0.0'}
+    peerDependencies:
+      bufferutil: ^4.0.1
+      utf-8-validate: '>=5.0.2'
+    peerDependenciesMeta:
+      bufferutil:
+        optional: true
+      utf-8-validate:
+        optional: true
+    dev: true
+
+  /y18n@5.0.8:
+    resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==}
+    engines: {node: '>=10'}
+    dev: true
+
+  /yallist@4.0.0:
+    resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==}
+
+  /yaml@1.10.2:
+    resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==}
+    engines: {node: '>= 6'}
+
+  /yargs-parser@21.1.1:
+    resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==}
+    engines: {node: '>=12'}
+    dev: true
+
+  /yargs@17.7.1:
+    resolution: {integrity: sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==}
+    engines: {node: '>=12'}
+    dependencies:
+      cliui: 8.0.1
+      escalade: 3.1.1
+      get-caller-file: 2.0.5
+      require-directory: 2.1.1
+      string-width: 4.2.3
+      y18n: 5.0.8
+      yargs-parser: 21.1.1
+    dev: true
+
+  /yargs@17.7.2:
+    resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==}
+    engines: {node: '>=12'}
+    dependencies:
+      cliui: 8.0.1
+      escalade: 3.1.1
+      get-caller-file: 2.0.5
+      require-directory: 2.1.1
+      string-width: 4.2.3
+      y18n: 5.0.8
+      yargs-parser: 21.1.1
+    dev: true
+
+  /yauzl@2.10.0:
+    resolution: {integrity: sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==}
+    dependencies:
+      buffer-crc32: 0.2.13
+      fd-slicer: 1.1.0
+    dev: true
+
+  /yn@3.1.1:
+    resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==}
+    engines: {node: '>=6'}
+
+  /yocto-queue@0.1.0:
+    resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
+    engines: {node: '>=10'}
+    dev: false
+
+  /yocto-queue@1.0.0:
+    resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==}
+    engines: {node: '>=12.20'}
+    dev: true
+
+  /zip-stream@5.0.1:
+    resolution: {integrity: sha512-UfZ0oa0C8LI58wJ+moL46BDIMgCQbnsb+2PoiJYtonhBsMh2bq1eRBVkvjfVsqbEHd9/EgKPUuL9saSSsec8OA==}
+    engines: {node: '>= 12.0.0'}
+    dependencies:
+      archiver-utils: 4.0.1
+      compress-commons: 5.0.1
+      readable-stream: 3.6.2
+    dev: true
diff --git a/pnpm-lock.yml b/pnpm-lock.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c45d934995f1cbfc2c765e4d65c09256e54c0775
--- /dev/null
+++ b/pnpm-lock.yml
@@ -0,0 +1,30 @@
+{
+	"license": "MIT",
+	"packageManager": "pnpm@8.10.5",
+	"dependencies": {
+		"@typescript-eslint/eslint-plugin": "^5.51.0",
+		"@typescript-eslint/parser": "^5.51.0",
+		"eslint": "^8.35.0",
+		"eslint-config-prettier": "^9.0.0",
+		"eslint-plugin-prettier": "^4.2.1",
+		"eslint-plugin-svelte": "^2.30.0",
+		"prettier": "^3.0.0",
+		"prettier-plugin-svelte": "^3.0.0",
+		"typescript": "^5.0.0",
+		"vite": "4.1.4"
+	},
+	"scripts": {
+		"lint": "eslint --quiet --fix --ext .cjs,.ts .eslintrc.cjs",
+		"lint:check": "eslint --ext .cjs,.ts .eslintrc.cjs",
+		"format": "prettier --write package.json .prettierrc .vscode .eslintrc.cjs e2e .github *.md",
+		"format:check": "prettier --check package.json .prettierrc .vscode .eslintrc.cjs .github *.md"
+	},
+	"devDependencies": {
+		"@vitest/browser": "^0.29.7",
+		"semver": "^7.5.0",
+		"ts-node": "^10.9.1",
+		"tsup": "^6.7.0",
+		"vitest": "^0.29.4",
+		"webdriverio": "^8.6.7"
+	}
+}