Spaces:
Runtime error
Runtime error
Anonymous941
commited on
Commit
•
61f9504
1
Parent(s):
bfbf5ad
Update app.py and pipeline.py
Browse files- app.py +3 -3
- pipeline.py +0 -39
app.py
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
from datasets import load_dataset, Image
|
3 |
|
4 |
-
dataset = load_dataset("botmaster/mother-2-battle-sprites", split="train")
|
5 |
-
gr.Interface.load("models/templates/text-to-image").launch()
|
6 |
-
|
7 |
import torch
|
8 |
import nltk
|
9 |
import io
|
@@ -43,3 +40,6 @@ class PreTrainedPipeline():
|
|
43 |
img = output[0]
|
44 |
img = (img + 1) / 2.0
|
45 |
img = transforms.ToPILImage()(img)
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from datasets import load_dataset, Image
|
3 |
|
|
|
|
|
|
|
4 |
import torch
|
5 |
import nltk
|
6 |
import io
|
|
|
40 |
img = output[0]
|
41 |
img = (img + 1) / 2.0
|
42 |
img = transforms.ToPILImage()(img)
|
43 |
+
|
44 |
+
dataset = load_dataset("botmaster/mother-2-battle-sprites", split="train")
|
45 |
+
gr.Interface.load("models/templates/text-to-image").launch()
|
pipeline.py
CHANGED
@@ -1,39 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import nltk
|
3 |
-
import io
|
4 |
-
import base64
|
5 |
-
import shutil
|
6 |
-
from torchvision import transforms
|
7 |
-
|
8 |
-
from pytorch_pretrained_biggan import BigGAN, one_hot_from_names, truncated_noise_sample
|
9 |
-
|
10 |
-
class PreTrainedPipeline():
|
11 |
-
def __init__(self, path=""):
|
12 |
-
"""
|
13 |
-
Initialize model
|
14 |
-
"""
|
15 |
-
nltk.download('wordnet')
|
16 |
-
self.model = BigGAN.from_pretrained(path)
|
17 |
-
self.truncation = 0.1
|
18 |
-
|
19 |
-
def __call__(self, inputs: str):
|
20 |
-
"""
|
21 |
-
Args:
|
22 |
-
inputs (:obj:`str`):
|
23 |
-
a string containing some text
|
24 |
-
Return:
|
25 |
-
A :obj:`PIL.Image` with the raw image representation as PIL.
|
26 |
-
"""
|
27 |
-
class_vector = one_hot_from_names([inputs], batch_size=1)
|
28 |
-
if type(class_vector) == type(None):
|
29 |
-
raise ValueError("Input is not in ImageNet")
|
30 |
-
noise_vector = truncated_noise_sample(truncation=self.truncation, batch_size=1)
|
31 |
-
noise_vector = torch.from_numpy(noise_vector)
|
32 |
-
class_vector = torch.from_numpy(class_vector)
|
33 |
-
with torch.no_grad():
|
34 |
-
output = self.model(noise_vector, class_vector, self.truncation)
|
35 |
-
|
36 |
-
# Scale image
|
37 |
-
img = output[0]
|
38 |
-
img = (img + 1) / 2.0
|
39 |
-
img = transforms.ToPILImage()(img)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|