Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import torch
|
2 |
import tensorflow as tf
|
3 |
import flax
|
@@ -25,3 +26,22 @@ iface = gr.Interface(fn=predict_sentiment, inputs="text", outputs = ["label","nu
|
|
25 |
if __name__ == "__main__":
|
26 |
iface.launch()
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
import torch
|
3 |
import tensorflow as tf
|
4 |
import flax
|
|
|
26 |
if __name__ == "__main__":
|
27 |
iface.launch()
|
28 |
|
29 |
+
"""
|
30 |
+
|
31 |
+
|
32 |
+
from transformers import pipeline AutoModelForCausalLM, AutoTokenizer
|
33 |
+
import torch
|
34 |
+
|
35 |
+
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
|
36 |
+
|
37 |
+
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
38 |
+
|
39 |
+
model = AutoModelForCausalLM.from_pretrained("gpt2", pad_token_id=tokenizer.eos_token_id).to(torch_device)
|
40 |
+
|
41 |
+
model_inputs = tokenizer('An explanation of Linear Regression: ', return_tensors='pt').to(torch_device)
|
42 |
+
|
43 |
+
output = model.generate(**model_inputs, max_new_tokens=50, do_sample=True, top_p=0.92, top_k=0, temperature=0.6)
|
44 |
+
|
45 |
+
print(tokenizer.decode(output[0],skip_special_tokens=True))
|
46 |
+
|
47 |
+
|