|
|
|
import gradio as gr |
|
import numpy as np |
|
import pandas as pd |
|
import re |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import torch |
|
|
|
|
|
labels = ['business', 'science','health', 'world', 'sport', 'politics','entertainment', 'tech'] |
|
|
|
|
|
model_name = 'valurank/finetuned-distilbert-news-article-categorization' |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
def read_in_text(url): |
|
with open(url, 'r') as file: |
|
article = file.read() |
|
|
|
return article |
|
|
|
def clean_text(url): |
|
text = url |
|
text = text.encode("ascii", errors="ignore").decode( |
|
"ascii" |
|
) |
|
|
|
text = re.sub(r"\n", " ", text) |
|
text = re.sub(r"\n\n", " ", text) |
|
text = re.sub(r"\t", " ", text) |
|
text = text.strip(" ") |
|
text = re.sub( |
|
" +", " ", text |
|
).strip() |
|
|
|
text = re.sub(r'Date\s\d{1,2}\/\d{1,2}\/\d{4}', '', text) |
|
text = re.sub(r'\d{1,2}:\d{2}\s[A-Z]+\s[A-Z]+', '', text) |
|
|
|
return text |
|
|
|
|
|
def get_category(file): |
|
text = clean_text(file) |
|
|
|
input_tensor = tokenizer.encode(text, return_tensors='pt', truncation=True) |
|
logits = model(input_tensor).logits |
|
|
|
softmax = torch.nn.Softmax(dim=1) |
|
probs = softmax(logits)[0] |
|
probs = probs.cpu().detach().numpy() |
|
max_index = np.argmax(probs) |
|
emotion = labels[max_index] |
|
|
|
return emotion |
|
|
|
|
|
demo = gr.Interface(get_category, inputs=gr.inputs.Textbox(label='Drop your articles here'), |
|
outputs = 'text', |
|
title='News Article Categorization') |
|
|
|
|
|
if __name__ == '__main__': |
|
demo.launch(debug=True) |