gpt2long / app.py
archit11's picture
Update app.py
25c37c6 verified
import torch
import torch.nn as nn
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config
from rotary_embedding_torch import RotaryEmbedding
import gradio as gr
import spaces
# Define the max length used during training
max_length = 8192
# Load the model and tokenizer from Hugging Face Hub
model_name = "archit11/gpt2-long-finetuned"
config = GPT2Config.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
# Add rotary embeddings
rotary_emb = RotaryEmbedding(
dim=32,
interpolate_factor=4.0,
)
for layer in model.transformer.h:
layer.attn.rotary_emb = rotary_emb
# Set the model to evaluation mode
model.eval()
# Define the inference function
@spaces.GPU(duration=120)
def generate_text(prompt, max_length=8192, temperature=0.7, top_p=0.9):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
# Generate text
with torch.no_grad():
output = model.generate(
input_ids,
max_length=max_length,
temperature=temperature,
top_p=top_p,
num_return_sequences=1,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
return generated_text
# Create Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(lines=5, label="Prompt"),
gr.Slider(minimum=1, maximum=8192, value=100, step=10, label="Max Length"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.1, label="Top-p")
],
outputs=gr.Textbox(lines=10, label="Generated Text"),
title="Long context GPT-2 Text Generation",
description="GPT2 extended context via RoPE scaling (finetuned on longalpac-12k), recommened to give long input rather than output since it zerogpu times out befre completing the generation (token slider = input + output tokens)"
)
# Launch the interface
iface.launch()