Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
+
|
4 |
+
tokenizer = AutoTokenizer.from_pretrained("VietAI/vit5-large-vietnews-summarization")
|
5 |
+
|
6 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("VietAI/vit5-large-vietnews-summarization")
|
7 |
+
|
8 |
+
def preprocess(inp):
|
9 |
+
text = "summarize: " + inp + " </s>"
|
10 |
+
features = tokenizer(text, return_tensors="pt")
|
11 |
+
return features['input_ids'], features['attention_masks']
|
12 |
+
def predict(input_ids, attention_masks):
|
13 |
+
outputs = model.generate(
|
14 |
+
input_ids=input_ids, attention_mask=attention_masks,
|
15 |
+
max_length=256,
|
16 |
+
early_stopping=True,
|
17 |
+
)
|
18 |
+
res = tokenizer.batch_decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0]
|
19 |
+
return res
|
20 |
+
|
21 |
+
if __name__ == '__main__':
|
22 |
+
st.title("ViT5 News Abstractive Summarization (Vietnamese)")
|
23 |
+
with st.container():
|
24 |
+
txt = st.text_area('Enter long documment...', ' ')
|
25 |
+
inp_ids, attn_mask = preprocess(txt)
|
26 |
+
st.write('Summary:', predict(inp_ids, attn_mask))
|
27 |
+
|