import gradio as gr from huggingface_hub import from_pretrained_keras from huggingface_hub import KerasModelHubMixin import transformers from transformers import AutoTokenizer m = from_pretrained_keras('sgonzalezsilot/FakeNews-Detection-Twitter-Thesis') # model = from_pretrained_keras("keras-io/cct") MODEL = "digitalepidemiologylab/covid-twitter-bert-v2" tokenizer = AutoTokenizer.from_pretrained(MODEL) def bert_encode(tokenizer,data,maximum_length) : input_ids = [] attention_masks = [] for i in range(len(data)): encoded = tokenizer.encode_plus( data[i], add_special_tokens=True, max_length=maximum_length, pad_to_max_length=True, truncation = True, return_attention_mask=True, ) input_ids.append(encoded['input_ids']) attention_masks.append(encoded['attention_mask']) return np.array(input_ids),np.array(attention_masks) # train_encodings = tokenizer(train_texts, truncation=True, padding=True) # test_encodings = tokenizer(test_texts, truncation=True, padding=True) def get_news(input_text): sentence_length = 110 train_input_ids,train_attention_masks = bert_encode(tokenizer,input_text,sentence_length) return m([train_input_ids,train_attention_masks]) iface = gr.Interface(fn = get_news, inputs = "text", outputs = ['text'], title = 'Fake News', description="") iface.launch(inline = False)