laptop_guru / app.py
tdnathmlenthusiast's picture
Update app.py
c55a83e verified
raw
history blame
1.49 kB
import transformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from gradio import Interface as gr
from pathlib import Path
from fastai.text.all import *
from datasets import load_dataset
# Download and prepare SQuAD dataset (not used directly here)
squad = load_dataset("squad")
# Load the pre-trained summarization model (adjust model name as needed)
model_name = "facebook/bart-base" # Choose a suitable summarization model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
# Define a function to generate summaries using the model
def generate_summary(input_text):
# Tokenize the input text
inputs = tokenizer(input_text, return_tensors="pt")
# Generate summary using the pre-trained model
output = model.generate(**inputs)
# Decode the generated tokens back to text
summary_text = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
return summary_text
# Create an interface for the model
interface = gr.Interface(
fn=generate_summary, # The function to generate summaries
inputs=gr.inputs.Textbox(), # Input field for text
outputs=gr.outputs.Textbox(), # Output field for generated text
live=True, # Whether to update results in real-time
title="Laptop Guru", # Title of the interface
description="Enter your requirements & get valuable insight from Guru." # Description of the interface
)
# Start the Gradio app
interface.launch(inline=True)