Anonymous941 commited on
Commit
a9c17e5
1 Parent(s): b9bd985

please work

Browse files
Files changed (4) hide show
  1. config.json +62 -0
  2. pipeline.py +40 -0
  3. pytorch_model.bin +3 -0
  4. requirements.txt +3 -0
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "attention_layer_position": 8,
3
+ "channel_width": 128,
4
+ "class_embed_dim": 128,
5
+ "eps": 0.0001,
6
+ "layers": [
7
+ [
8
+ false,
9
+ 16,
10
+ 16
11
+ ],
12
+ [
13
+ true,
14
+ 16,
15
+ 16
16
+ ],
17
+ [
18
+ false,
19
+ 16,
20
+ 16
21
+ ],
22
+ [
23
+ true,
24
+ 16,
25
+ 8
26
+ ],
27
+ [
28
+ false,
29
+ 8,
30
+ 8
31
+ ],
32
+ [
33
+ true,
34
+ 8,
35
+ 4
36
+ ],
37
+ [
38
+ false,
39
+ 4,
40
+ 4
41
+ ],
42
+ [
43
+ true,
44
+ 4,
45
+ 2
46
+ ],
47
+ [
48
+ false,
49
+ 2,
50
+ 2
51
+ ],
52
+ [
53
+ true,
54
+ 2,
55
+ 1
56
+ ]
57
+ ],
58
+ "n_stats": 51,
59
+ "num_classes": 1000,
60
+ "output_dim": 128,
61
+ "z_dim": 128
62
+ }
pipeline.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import nltk
3
+ import io
4
+ import base64
5
+ import shutil
6
+ from torchvision import transforms
7
+
8
+ from pytorch_pretrained_biggan import BigGAN, one_hot_from_names, truncated_noise_sample
9
+
10
+ class PreTrainedPipeline():
11
+ def __init__(self, path=""):
12
+ """
13
+ Initialize model
14
+ """
15
+ nltk.download('wordnet')
16
+ self.model = BigGAN.from_pretrained(path)
17
+ self.truncation = 0.1
18
+
19
+ def __call__(self, inputs: str):
20
+ """
21
+ Args:
22
+ inputs (:obj:`str`):
23
+ a string containing some text
24
+ Return:
25
+ A :obj:`PIL.Image` with the raw image representation as PIL.
26
+ """
27
+ class_vector = one_hot_from_names([inputs], batch_size=1)
28
+ if type(class_vector) == type(None):
29
+ raise ValueError("Input is not in ImageNet")
30
+ noise_vector = truncated_noise_sample(truncation=self.truncation, batch_size=1)
31
+ noise_vector = torch.from_numpy(noise_vector)
32
+ class_vector = torch.from_numpy(class_vector)
33
+ with torch.no_grad():
34
+ output = self.model(noise_vector, class_vector, self.truncation)
35
+
36
+ # Scale image
37
+ img = output[0]
38
+ img = (img + 1) / 2.0
39
+ img = transforms.ToPILImage()(img)
40
+ return img
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b86146a8cfcde4cda135adba4b2806f78b3abe2dac9d0fd090cc3ab318604ad9
3
+ size 210849368
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ pytorch-pretrained-biggan
2
+ nltk==3.6.5
3
+ torchvision