Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
import torch | |
from collections import Counter | |
from scipy.special import softmax | |
article_string = "Author: <a href=\"https://huggingface.co/ruanchaves\">Ruan Chaves Rodrigues</a>. Read more about our <a href=\"https://github.com/ruanchaves/eplm\">research on the evaluation of Portuguese language models</a>." | |
app_title = "Question Answering (Respostas a Perguntas)" | |
app_description = """ | |
This app determines if an answer is appropriate for a question. You can either introduce your own sentences by filling in "Question" and "Answer" or click on one of the example pairs provided below. | |
(Este aplicativo determina se uma resposta é apropriada para uma pergunta. Você pode introduzir suas próprias frases preenchendo "Question" e "Answer" ou clicar em um dos exemplos de pares fornecidos abaixo.) | |
""" | |
app_examples = [ | |
["Qual a montanha mais alta do mundo?", "Monte Everest é a montanha mais alta do mundo."], | |
["Quais as duas línguas mais faladas no mundo?", "Leonardo da Vinci pintou a Mona Lisa."], | |
["Qual a personagem mais famosa de Maurício de Sousa?", "A personagem mais famosa de Mauricio de Sousa é a Mônica."], | |
] | |
output_textbox_component_description = """ | |
Output will appear here once the app has finished analyzing the answer. | |
(A saída aparecerá aqui assim que o aplicativo terminar de analisar a resposta.) | |
""" | |
output_json_component_description = { "breakdown": """ | |
This box presents a detailed breakdown of the evaluation for each model. | |
""", | |
"detalhamento": """ | |
(Esta caixa apresenta um detalhamento da avaliação para cada modelo.) | |
""" } | |
short_score_descriptions = { | |
0: "Unsuitable", | |
1: "Suitable" | |
} | |
score_descriptions = { | |
0: "Negative: The answer is not suitable for the provided question.", | |
1: "Positive: The answer is suitable for the provided question.", | |
} | |
score_descriptions_pt = { | |
0: "(Negativo: A resposta não é adequada para a pergunta fornecida.)", | |
1: "(Positivo: A resposta é adequada para a pergunta fornecida.)", | |
} | |
model_list = [ | |
"ruanchaves/mdeberta-v3-base-faquad-nli", | |
"ruanchaves/bert-base-portuguese-cased-faquad-nli", | |
"ruanchaves/bert-large-portuguese-cased-faquad-nli", | |
] | |
user_friendly_name = { | |
"ruanchaves/mdeberta-v3-base-faquad-nli": "mDeBERTa-v3 (FaQuAD)", | |
"ruanchaves/bert-base-portuguese-cased-faquad-nli": "BERTimbau base (FaQuAD)", | |
"ruanchaves/bert-large-portuguese-cased-faquad-nli": "BERTimbau large (FaQuAD)", | |
} | |
reverse_user_friendly_name = { v:k for k,v in user_friendly_name.items() } | |
user_friendly_name_list = list(user_friendly_name.values()) | |
model_array = [] | |
for model_name in model_list: | |
row = {} | |
row["name"] = model_name | |
row["tokenizer"] = AutoTokenizer.from_pretrained(model_name) | |
row["model"] = AutoModelForSequenceClassification.from_pretrained(model_name) | |
model_array.append(row) | |
def most_frequent(array): | |
occurence_count = Counter(array) | |
return occurence_count.most_common(1)[0][0] | |
def predict(s1, s2, chosen_model): | |
if not chosen_model: | |
chosen_model = user_friendly_name_list[0] | |
scores = {} | |
full_chosen_model_name = reverse_user_friendly_name[chosen_model] | |
for row in model_array: | |
name = row["name"] | |
if name != full_chosen_model_name: | |
continue | |
else: | |
tokenizer = row["tokenizer"] | |
model = row["model"] | |
model_input = tokenizer(*([s1], [s2]), padding=True, return_tensors="pt") | |
with torch.no_grad(): | |
output = model(**model_input) | |
logits = output[0][0].detach().numpy() | |
logits = softmax(logits).tolist() | |
break | |
def get_description(idx): | |
description = score_descriptions[idx] | |
description_pt = score_descriptions_pt[idx] | |
final_description = description + "\n \n" + description_pt | |
return final_description | |
max_pos = logits.index(max(logits)) | |
markdown_description = get_description(max_pos) | |
scores = { short_score_descriptions[k]:v for k,v in enumerate(logits) } | |
return scores, markdown_description | |
inputs = [ | |
gr.Textbox(label="Question", value=app_examples[0][0]), | |
gr.Textbox(label="Answer", value=app_examples[0][1]), | |
gr.Dropdown(label="Model", choices=user_friendly_name_list, value=user_friendly_name_list[0]) | |
] | |
outputs = [ | |
gr.Label(label="Result"), | |
gr.Markdown() | |
] | |
gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title=app_title, | |
description=app_description, | |
examples=app_examples, | |
article = article_string).launch() |