File size: 3,266 Bytes
225d0ae fe05745 225d0ae 14ca625 38083c7 d1e1d59 38083c7 d1e1d59 1480d1d d1e1d59 38083c7 14ca625 38083c7 b8dd604 14ca625 b8dd604 14ca625 4000803 14ca625 225d0ae 38083c7 40ee000 225d0ae 4192118 225d0ae fbb10e1 225d0ae 4000803 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import streamlit as st
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.layer = nn.Sequential(
nn.Linear(768, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 8),
)
def forward(self,x):
return self.layer(x)
@st.cache
def GetModel():
model = Net()
model.load_state_dict(torch.load('model.dat', map_location=torch.device('cpu')))
return model
@st.cache(allow_output_mutation=True)
def GetModelAndTokenizer():
model = GetModel()
tokenizer = AutoTokenizer.from_pretrained("Callidior/bert2bert-base-arxiv-titlegen")
model_emb = AutoModelForSeq2SeqLM.from_pretrained("Callidior/bert2bert-base-arxiv-titlegen")
return model, tokenizer, model_emb
def BuildAnswer(txt):
def get_hidden_states(encoded, model):
with torch.no_grad():
output = model(decoder_input_ids=encoded['input_ids'], output_hidden_states=True, **encoded)
layers = [-4, -3, -2, -1]
states = output['decoder_hidden_states']
output = torch.stack([states[i] for i in layers]).sum(0).squeeze()
return output.mean(dim=0)
def get_word_vector(sent, tokenizer, model):
encoded = tokenizer.encode_plus(sent, return_tensors="pt", truncation=True)
return get_hidden_states(encoded, model)
labels_articles = {
1: 'Computer Science',
2: 'Economics',
3: "Electrical Engineering And Systems Science",
4: "Mathematics",
5: "Physics",
6: "Quantitative Biology",
7: "Quantitative Finance",
8: "Statistics"
}
txt = txt.strip()
if txt == '':
return ''
model, tokenizer, model_emb = GetModelAndTokenizer()
embed = get_word_vector(txt, tokenizer, model_emb)
logits = torch.nn.functional.softmax(model(embed), dim=0)
best_tags = torch.argsort(logits, descending=True)
sum = 0
result = []
for tag in best_tags:
if sum > 0.95:
break
sum += logits[tag.item()]
res = round(float(logits[tag.item()].cpu()) * 100)
label = labels_articles[tag.item() + 1]
result.append(f'{res:3d}% - {label}')
return result
st.markdown("### Hello, world!")
st.markdown("<img width=200px src='https://rozetked.me/images/uploads/dwoilp3BVjlE.jpg'>", unsafe_allow_html=True)
# ^-- можно показывать пользователю текст, картинки, ограниченное подмножество html - всё как в jupyter
title = st.text_area("Title:")
abstract = st.text_area("Abstract:", height=400)
#from transformers import pipeline
#pipe = pipeline("ner", "Davlan/distilbert-base-multilingual-cased-ner-hrl")
#raw_predictions = pipe(text)
# тут уже знакомый вам код с huggingface.transformers -- его можно заменить на что угодно от fairseq до catboost
result = BuildAnswer(title + ' ' + abstract)
for res in result:
st.markdown(f"{res}") |