import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch import numpy as np # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained("michellejieli/emotion_text_classifier") model = AutoModelForSequenceClassification.from_pretrained("michellejieli/emotion_text_classifier") # Function to classify emotions def classify_emotion(text): inputs = tokenizer(text, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits probabilities = torch.nn.functional.softmax(logits, dim=-1) probs = probabilities.detach().numpy()[0] labels = ["anger", "disgust", "fear", "joy", "neutral", "sadness", "surprise"] results = {label: prob for label, prob in zip(labels, probs)} return results # Gradio interface setup iface = gr.Interface( fn=classify_emotion, inputs=gr.Textbox(lines=2, placeholder="Enter a sentence to analyze emotions", label="Input Text"), outputs=gr.Label(label="Emotion Probabilities"), title="Emotion Classifier", description="Enter a sentence and see the probabilities of different emotions." ) if __name__ == "__main__": iface.launch()