Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -15,6 +15,13 @@ def load_model(model_name):
|
|
15 |
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
16 |
|
17 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
return tokenizer, model
|
19 |
|
20 |
# Load Hugging Face token
|
@@ -36,16 +43,34 @@ def compare_models(prompt, original_model_name, fine_tuned_model_name):
|
|
36 |
fine_tuned_model.eval()
|
37 |
|
38 |
# Generate text with the original model
|
39 |
-
inputs_orig = original_tokenizer(prompt, return_tensors="pt")
|
40 |
with torch.no_grad():
|
41 |
-
generated_ids_orig = original_model.generate(
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
# Generate text with the fine-tuned model
|
45 |
-
inputs_fine = fine_tuned_tokenizer(prompt, return_tensors="pt")
|
46 |
with torch.no_grad():
|
47 |
-
generated_ids_fine = fine_tuned_model.generate(
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
# Return the generated text from both models for comparison
|
51 |
result = {
|
@@ -59,8 +84,8 @@ iface = gr.Interface(
|
|
59 |
fn=compare_models,
|
60 |
inputs=[
|
61 |
gr.Textbox(lines=5, placeholder="Enter text here...", label="Input Text"),
|
62 |
-
gr.Textbox(lines=1, placeholder="
|
63 |
-
gr.Textbox(lines=1, placeholder="
|
64 |
],
|
65 |
outputs=gr.JSON(label="Generated Texts"),
|
66 |
title="Compare Text Generation from Original and Fine-Tuned Models",
|
|
|
15 |
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
16 |
|
17 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
18 |
+
|
19 |
+
# Assign eos_token as pad_token if not already set
|
20 |
+
if tokenizer.pad_token is None:
|
21 |
+
tokenizer.pad_token = tokenizer.eos_token
|
22 |
+
if model.config.pad_token_id is None:
|
23 |
+
model.config.pad_token_id = tokenizer.pad_token_id
|
24 |
+
|
25 |
return tokenizer, model
|
26 |
|
27 |
# Load Hugging Face token
|
|
|
43 |
fine_tuned_model.eval()
|
44 |
|
45 |
# Generate text with the original model
|
46 |
+
inputs_orig = original_tokenizer(prompt, return_tensors="pt", padding=True)
|
47 |
with torch.no_grad():
|
48 |
+
generated_ids_orig = original_model.generate(
|
49 |
+
input_ids=inputs_orig["input_ids"],
|
50 |
+
attention_mask=inputs_orig["attention_mask"],
|
51 |
+
max_length=100,
|
52 |
+
pad_token_id=original_tokenizer.pad_token_id
|
53 |
+
)
|
54 |
+
generated_text_orig = original_tokenizer.decode(
|
55 |
+
generated_ids_orig[0],
|
56 |
+
skip_special_tokens=True,
|
57 |
+
clean_up_tokenization_spaces=True # Optional
|
58 |
+
)
|
59 |
|
60 |
# Generate text with the fine-tuned model
|
61 |
+
inputs_fine = fine_tuned_tokenizer(prompt, return_tensors="pt", padding=True)
|
62 |
with torch.no_grad():
|
63 |
+
generated_ids_fine = fine_tuned_model.generate(
|
64 |
+
input_ids=inputs_fine["input_ids"],
|
65 |
+
attention_mask=inputs_fine["attention_mask"],
|
66 |
+
max_length=100,
|
67 |
+
pad_token_id=fine_tuned_tokenizer.pad_token_id
|
68 |
+
)
|
69 |
+
generated_text_fine = fine_tuned_tokenizer.decode(
|
70 |
+
generated_ids_fine[0],
|
71 |
+
skip_special_tokens=True,
|
72 |
+
clean_up_tokenization_spaces=True # Optional
|
73 |
+
)
|
74 |
|
75 |
# Return the generated text from both models for comparison
|
76 |
result = {
|
|
|
84 |
fn=compare_models,
|
85 |
inputs=[
|
86 |
gr.Textbox(lines=5, placeholder="Enter text here...", label="Input Text"),
|
87 |
+
gr.Textbox(lines=1, placeholder="e.g., gpt2-medium", label="Original Model Name"),
|
88 |
+
gr.Textbox(lines=1, placeholder="e.g., your-username/gpt2-medium-finetuned", label="Fine-Tuned Model Name")
|
89 |
],
|
90 |
outputs=gr.JSON(label="Generated Texts"),
|
91 |
title="Compare Text Generation from Original and Fine-Tuned Models",
|