File size: 2,318 Bytes
bea74aa
 
 
b5ac54b
 
bea74aa
 
 
 
 
3019ade
 
bea74aa
3019ade
bea74aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b5ac54b
 
bea74aa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import torch
from torch import nn
from transformers import AutoModel, AutoTokenizer
import gradio as gr

# Check if CUDA is available
if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")


class RaceClassifier(nn.Module):

    def __init__(self, n_classes):
        super(RaceClassifier, self).__init__()
        self.bert = AutoModel.from_pretrained("vinai/bertweet-base")
        self.drop = nn.Dropout(p=0.3)  # can be changed in future
        self.out = nn.Linear(self.bert.config.hidden_size,
                             n_classes)  # linear layer for the output with the number of classes

    def forward(self, input_ids, attention_mask):
        bert_output = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask
        )
        last_hidden_state = bert_output[0]
        pooled_output = last_hidden_state[:, 0]
        output = self.drop(pooled_output)
        return self.out(output)


labels = {
    0: "African American",
    1: "Asian",
    2: "Latin",
    3: "White"
}
model_race = RaceClassifier(n_classes=4)
model_race.to(device)
model_race.load_state_dict(torch.load('best_model_race.pt'))


def predict(text):
    sentences = [
        text
    ]

    tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", normalization=True)

    encoded_sentences = tokenizer(
        sentences,
        padding=True,
        truncation=True,
        return_tensors='pt',
        max_length=128,
    )

    input_ids = encoded_sentences["input_ids"].to(device)
    attention_mask = encoded_sentences["attention_mask"].to(device)

    model_race.eval()
    with torch.no_grad():
        outputs = model_race(input_ids, attention_mask)
        probs = torch.nn.functional.softmax(outputs, dim=1)
        predictions = torch.argmax(outputs, dim=1)
        predictions = predictions.cpu().numpy()

    output_string = ""
    for i, prob in enumerate(probs[0]):
        print(f"{labels[i]}: %{round(prob.item() * 100, 2)}")
        output_string += f"{labels[i]}: %{round(prob.item() * 100, 2)}\n"

    print(labels[predictions[0]])
    output_string += f"Predicted as: {labels[predictions[0]]}"

    return output_string


demo = gr.Interface(
    fn=predict,
    inputs=["text"],
    outputs=["text"],
)

demo.launch()