Spaces:
Runtime error
Runtime error
File size: 1,511 Bytes
5edfd4d cde71df 715b816 a5f5982 4c91d9b 5edfd4d a5f5982 5edfd4d 4c91d9b 5edfd4d c5c3b96 5edfd4d 715b816 5edfd4d 715b816 779d214 a5f5982 29b1ee9 715b816 5edfd4d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import transformers
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import gradio as gr
from gradio import Interface
from pathlib import Path
from fastai.text.all import *
from datasets import load_dataset
# Download and prepare SQuAD dataset (not used directly here)
squad = load_dataset("squad")
# Load the pre-trained summarization model (adjust model name as needed)
model_name = "laptop_summarizer_1.pkl" # Choose a suitable summarization model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
# Define a function to generate summaries using the model
def generate_summary(input_text):
# Tokenize the input text
inputs = tokenizer(input_text, return_tensors="pt")
# Generate summary using the pre-trained model
output = model.generate(**inputs)
# Decode the generated tokens back to text
summary_text = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
return summary_text
# Create an interface for the model
interfaces = gr.Interface(
fn=generate_summary, # The function to generate summaries
inputs=gr.inputs.Textbox(), # Input field for text
outputs=gr.outputs.Textbox(), # Output field for generated text
live=True, # Whether to update results in real-time
title="Laptop Guru", # Title of the interface
description="Enter your requirements & get valuable insight from Guru." # Description of the interface
)
# Start the Gradio app
interface.launch(inline=True)
|