|
import torch |
|
import torch.nn as nn |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config |
|
from rotary_embedding_torch import RotaryEmbedding |
|
import gradio as gr |
|
import spaces |
|
|
|
max_length = 8192 |
|
|
|
|
|
model_name = "archit11/gpt2-long-finetuned" |
|
config = GPT2Config.from_pretrained(model_name) |
|
model = GPT2LMHeadModel.from_pretrained(model_name) |
|
tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") |
|
|
|
|
|
rotary_emb = RotaryEmbedding( |
|
dim=32, |
|
interpolate_factor=4.0, |
|
) |
|
for layer in model.transformer.h: |
|
layer.attn.rotary_emb = rotary_emb |
|
|
|
|
|
model.eval() |
|
|
|
|
|
@spaces.GPU(duration=120) |
|
def generate_text(prompt, max_length=8192, temperature=0.7, top_p=0.9): |
|
input_ids = tokenizer.encode(prompt, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
output = model.generate( |
|
input_ids, |
|
max_length=max_length, |
|
temperature=temperature, |
|
top_p=top_p, |
|
num_return_sequences=1, |
|
do_sample=True, |
|
pad_token_id=tokenizer.eos_token_id |
|
) |
|
|
|
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
return generated_text |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_text, |
|
inputs=[ |
|
gr.Textbox(lines=5, label="Prompt"), |
|
gr.Slider(minimum=1, maximum=8192, value=100, step=10, label="Max Length"), |
|
gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature"), |
|
gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.1, label="Top-p") |
|
], |
|
outputs=gr.Textbox(lines=10, label="Generated Text"), |
|
title="Long context GPT-2 Text Generation", |
|
description="GPT2 extended context via RoPE scaling (finetuned on longalpac-12k), recommened to give long input rather than output since it zerogpu times out befre completing the generation (token slider = input + output tokens)" |
|
) |
|
|
|
|
|
iface.launch() |