Spaces:
Sleeping
Sleeping
# 1. Import the required packages | |
import torch | |
import gradio as gr | |
from typing import Dict | |
from transformers import pipeline | |
# 2. Define function to use our model on given text | |
def food_not_food_classifier(text: str) -> Dict[str, float]: | |
# Set up text classification pipeline | |
food_not_food_classifier = pipeline(task="text-classification", | |
# Because our model is on Hugging Face already, we can pass in the model name directly | |
model="devagonal/bert-f1-durga-muhammad-c", # link to model on HF Hub | |
device="cuda" if torch.cuda.is_available() else "cpu", | |
top_k=None) # return all possible scores (not just top-1) | |
# Get outputs from pipeline (as a list of dicts) | |
outputs = food_not_food_classifier(text)[0] | |
# Format output for Gradio (e.g. {"label_1": probability_1, "label_2": probability_2}) | |
output_dict = {} | |
for item in outputs: | |
output_dict[item["label"]] = item["score"] | |
return output_dict | |
# 3. Create a Gradio interface with details about our app | |
description = """ | |
a text classifier to determine question class. | |
label0 = durga | |
label1 = muhammad | |
""" | |
demo = gr.Interface(fn=food_not_food_classifier, | |
inputs="text", | |
outputs=gr.Label(num_top_classes=2), # show top 2 classes (that's all we have) | |
title="Bert F1 Durga Muhammad c", | |
description=description, | |
examples=[["siapakah durga"], | |
["siapakah muhammad"]]) | |
# 4. Launch the interface | |
if __name__ == "__main__": | |
demo.launch() | |