Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
|
4 |
import torch
|
5 |
|
6 |
|
7 |
-
def
|
8 |
#mark the text with special tokens
|
9 |
text=[tokenizer.eos_token + i + tokenizer.eos_token for i in text]
|
10 |
batch=tokenizer(text, padding=True, return_tensors="pt")
|
@@ -28,8 +28,8 @@ tokenizer = GPT2Tokenizer.from_pretrained('gpt2-medium')
|
|
28 |
tokenizer.pad_token = tokenizer.eos_token
|
29 |
|
30 |
|
31 |
-
def
|
32 |
-
result =
|
33 |
return result
|
34 |
|
35 |
examples = [
|
@@ -39,9 +39,9 @@ examples = [
|
|
39 |
]
|
40 |
|
41 |
demo = gr.Interface(
|
42 |
-
fn=
|
43 |
-
inputs=gr.inputs.Textbox(lines=5, label="Input Text"),
|
44 |
-
outputs=gr.outputs.Textbox(label="Generated
|
45 |
examples=examples
|
46 |
)
|
47 |
|
|
|
4 |
import torch
|
5 |
|
6 |
|
7 |
+
def vipe_generate(text, model, tokenizer,device,do_sample,top_k=100, epsilon_cutoff=.00005, temperature=1):
|
8 |
#mark the text with special tokens
|
9 |
text=[tokenizer.eos_token + i + tokenizer.eos_token for i in text]
|
10 |
batch=tokenizer(text, padding=True, return_tensors="pt")
|
|
|
28 |
tokenizer.pad_token = tokenizer.eos_token
|
29 |
|
30 |
|
31 |
+
def vipe_generate(text):
|
32 |
+
result =generate(text,model,tokenizer,do_sample=True,device=device)
|
33 |
return result
|
34 |
|
35 |
examples = [
|
|
|
39 |
]
|
40 |
|
41 |
demo = gr.Interface(
|
42 |
+
fn=vipe_generate,
|
43 |
+
inputs=gr.inputs.Textbox(lines=5, label="Arbitrary Input Text"),
|
44 |
+
outputs=gr.outputs.Textbox(label="Generated Prompt for Visualizations"),
|
45 |
examples=examples
|
46 |
)
|
47 |
|