HFCompareModel / app.py
Vishwas1's picture
Update app.py
db2b363 verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2Tokenizer
import torch
from huggingface_hub import login
import os
# Load text generation model with fallback for tokenizer
def load_model(model_name):
try:
# Try loading the fast tokenizer first
tokenizer = AutoTokenizer.from_pretrained(model_name)
except Exception as e:
print(f"Fast tokenizer not available for {model_name}. Falling back to regular tokenizer. Error: {e}")
# If fast tokenizer is not available, fall back to the regular tokenizer
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Assign eos_token as pad_token if not already set
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
if model.config.pad_token_id is None:
model.config.pad_token_id = tokenizer.pad_token_id
return tokenizer, model
# Load Hugging Face token
hf_token = os.getenv('HF_API_TOKEN')
if not hf_token:
raise ValueError("Error: Hugging Face token not found. Please set it as an environment variable.")
# Login to Hugging Face Hub
login(hf_token)
# Function to compare text generation from both models
def compare_models(prompt, original_model_name, fine_tuned_model_name):
# Load the original and fine-tuned models based on user input
original_tokenizer, original_model = load_model(original_model_name)
fine_tuned_tokenizer, fine_tuned_model = load_model(fine_tuned_model_name)
# Ensure models are in evaluation mode
original_model.eval()
fine_tuned_model.eval()
# Generate text with the original model
inputs_orig = original_tokenizer(prompt, return_tensors="pt", padding=True)
with torch.no_grad():
generated_ids_orig = original_model.generate(
input_ids=inputs_orig["input_ids"],
attention_mask=inputs_orig["attention_mask"],
max_length=100,
pad_token_id=original_tokenizer.pad_token_id
)
generated_text_orig = original_tokenizer.decode(
generated_ids_orig[0],
skip_special_tokens=True,
clean_up_tokenization_spaces=True # Optional
)
# Generate text with the fine-tuned model
inputs_fine = fine_tuned_tokenizer(prompt, return_tensors="pt", padding=True)
with torch.no_grad():
generated_ids_fine = fine_tuned_model.generate(
input_ids=inputs_fine["input_ids"],
attention_mask=inputs_fine["attention_mask"],
max_length=100,
pad_token_id=fine_tuned_tokenizer.pad_token_id
)
generated_text_fine = fine_tuned_tokenizer.decode(
generated_ids_fine[0],
skip_special_tokens=True,
clean_up_tokenization_spaces=True # Optional
)
# Return the generated text from both models for comparison
result = {
"Original Model Output": generated_text_orig,
"Fine-Tuned Model Output": generated_text_fine
}
return result
# Gradio Interface
iface = gr.Interface(
fn=compare_models,
inputs=[
gr.Textbox(lines=5, placeholder="Enter text here...", label="Input Text"),
gr.Textbox(lines=1, placeholder="e.g., gpt2-medium", label="Original Model Name"),
gr.Textbox(lines=1, placeholder="e.g., your-username/gpt2-medium-finetuned", label="Fine-Tuned Model Name")
],
outputs=gr.JSON(label="Generated Texts"),
title="Compare Text Generation from Original and Fine-Tuned Models",
description="Enter a prompt and model names to generate text from the original and fine-tuned models."
)
iface.launch()