|
import gradio as gr |
|
from huggingface_hub import from_pretrained_keras |
|
from huggingface_hub import KerasModelHubMixin |
|
import transformers |
|
from transformers import AutoTokenizer |
|
import numpy as np |
|
|
|
|
|
m = from_pretrained_keras('sgonzalezsilot/FakeNews-Detection-Twitter-Thesis') |
|
|
|
|
|
MODEL = "digitalepidemiologylab/covid-twitter-bert-v2" |
|
tokenizer = AutoTokenizer.from_pretrained(MODEL) |
|
|
|
def bert_encode(tokenizer,data,maximum_length) : |
|
input_ids = [] |
|
attention_masks = [] |
|
|
|
|
|
for i in range(len(data)): |
|
encoded = tokenizer.encode_plus( |
|
|
|
data[i], |
|
add_special_tokens=True, |
|
max_length=maximum_length, |
|
pad_to_max_length=True, |
|
truncation = True, |
|
return_attention_mask=True, |
|
) |
|
|
|
input_ids.append(encoded['input_ids']) |
|
attention_masks.append(encoded['attention_mask']) |
|
|
|
return np.array(input_ids),np.array(attention_masks) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_news(input_text): |
|
sentence_length = 110 |
|
train_input_ids,train_attention_masks = bert_encode(tokenizer,[input_text],sentence_length) |
|
|
|
pred = m.predict([train_input_ids,train_attention_masks]) |
|
pred = np.round(pred) |
|
pred = pred.flatten() |
|
|
|
if pred == 1: |
|
result = "Fake News" |
|
else: |
|
result = "True News" |
|
return result |
|
|
|
tweet_input = gr.Textbox(label = "Enter the tweet") |
|
output = gr.Textbox(label="Result") |
|
|
|
iface = gr.Interface(fn = get_news, |
|
inputs = tweet_input, |
|
outputs = output, |
|
title = 'Fake News', |
|
description="") |
|
|
|
iface.launch(inline = False) |