|
|
|
"""FredAlpaca.ipynb |
|
|
|
Automatically generated by Colaboratory. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/drive/1W6DsQPLinVnuJKqhVASYpuVwuHhhtGLc |
|
""" |
|
|
|
import streamlit as st |
|
|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
from transformers import GenerationConfig |
|
|
|
model_name = "IlyaGusev/fred_t5_ru_turbo_alpaca" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to("cuda") |
|
model.eval() |
|
|
|
generation_config = GenerationConfig.from_pretrained(model_name) |
|
st.title('Модель') |
|
st.write('') |
|
|
|
text_in = st.text_input('Введите текст:') |
|
|
|
start = st.button('Start:') |
|
if start: |
|
for sample in text_in: |
|
data = tokenizer(sample, return_tensors="pt") |
|
data = {k: v.to(model.device) for k, v in data.items()} |
|
output_ids = model.generate( |
|
**data, |
|
generation_config=generation_config |
|
)[0] |
|
print(tokenizer.decode(data["input_ids"][0].tolist())) |
|
print(tokenizer.decode(output_ids.tolist())) |
|
print("====================") |
|
st.write("Результат:", tokenizer.decode(data["input_ids"][0].tolist()),tokenizer.decode(output_ids.tolist())) |