Upload 13 files
Browse files- model_index.json +20 -11
- pipeline.py +31 -23
model_index.json
CHANGED
@@ -1,15 +1,24 @@
|
|
1 |
{
|
2 |
"_class_name": "SuperDiffPipeline",
|
3 |
"_diffusers_version": "0.31.0",
|
4 |
-
"
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
"
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
"tokenizer":
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
}
|
|
|
1 |
{
|
2 |
"_class_name": "SuperDiffPipeline",
|
3 |
"_diffusers_version": "0.31.0",
|
4 |
+
"scheduler": [
|
5 |
+
"diffusers",
|
6 |
+
"EulerDiscreteScheduler"
|
7 |
+
],
|
8 |
+
"text_encoder": [
|
9 |
+
"transformers",
|
10 |
+
"CLIPTextModel"
|
11 |
+
],
|
12 |
+
"tokenizer": [
|
13 |
+
"transformers",
|
14 |
+
"CLIPTokenizer"
|
15 |
+
],
|
16 |
+
"unet": [
|
17 |
+
"diffusers",
|
18 |
+
"UNet2DConditionModel"
|
19 |
+
],
|
20 |
+
"vae": [
|
21 |
+
"diffusers",
|
22 |
+
"AutoencoderKL"
|
23 |
+
]
|
24 |
}
|
pipeline.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import random
|
|
|
2 |
from typing import Callable, Dict, List, Optional
|
3 |
|
4 |
import torch
|
@@ -33,31 +34,38 @@ class SuperDiffPipeline(DiffusionPipeline, ConfigMixin):
|
|
33 |
|
34 |
"""
|
35 |
super().__init__()
|
36 |
-
|
37 |
-
|
38 |
-
self.
|
39 |
-
self.
|
40 |
-
self.
|
|
|
|
|
41 |
|
42 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
@torch.no_grad
|
63 |
def get_batch(self, latents: Callable, nrow: int, ncol: int) -> Callable:
|
@@ -241,7 +249,7 @@ class SuperDiffPipeline(DiffusionPipeline, ConfigMixin):
|
|
241 |
(self.num_inference_steps + 1, self.batch_size), device=self.device
|
242 |
)
|
243 |
with torch.no_grad():
|
244 |
-
for i, t in enumerate(self.scheduler.timesteps):
|
245 |
dsigma = self.scheduler.sigmas[i +
|
246 |
1] - self.scheduler.sigmas[i]
|
247 |
sigma = self.scheduler.sigmas[i]
|
|
|
1 |
import random
|
2 |
+
from tqdm import tqdm
|
3 |
from typing import Callable, Dict, List, Optional
|
4 |
|
5 |
import torch
|
|
|
34 |
|
35 |
"""
|
36 |
super().__init__()
|
37 |
+
# Register additional parameters for flexibility
|
38 |
+
# Explicitly assign required components
|
39 |
+
#self.unet = unet
|
40 |
+
#self.vae = vae
|
41 |
+
#self.text_encoder = text_encoder
|
42 |
+
#self.tokenizer = tokenizer
|
43 |
+
#self.scheduler = scheduler
|
44 |
|
45 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
46 |
|
47 |
+
vae.to(device)
|
48 |
+
unet.to(device)
|
49 |
+
text_encoder.to(device)
|
50 |
+
self.register_modules(unet=unet,
|
51 |
+
scheduler=scheduler,
|
52 |
+
vae=vae,
|
53 |
+
text_encoder=text_encoder,
|
54 |
+
tokenizer=tokenizer,)
|
55 |
+
|
56 |
+
#self.register_to_config(
|
57 |
+
# vae=vae.__class__.__name__,
|
58 |
+
# scheduler=scheduler.__class__.__name__,
|
59 |
+
# tokenizer=tokenizer.__class__.__name__,
|
60 |
+
# unet=unet.__class__.__name__,
|
61 |
+
# text_encoder=text_encoder.__class__.__name__,
|
62 |
+
# device=device,
|
63 |
+
# batch_size=None,
|
64 |
+
# num_inference_steps=None,
|
65 |
+
# guidance_scale=None,
|
66 |
+
# lift=None,
|
67 |
+
# seed=None,
|
68 |
+
#)
|
69 |
|
70 |
@torch.no_grad
|
71 |
def get_batch(self, latents: Callable, nrow: int, ncol: int) -> Callable:
|
|
|
249 |
(self.num_inference_steps + 1, self.batch_size), device=self.device
|
250 |
)
|
251 |
with torch.no_grad():
|
252 |
+
for i, t in tqdm(enumerate(self.scheduler.timesteps)):
|
253 |
dsigma = self.scheduler.sigmas[i +
|
254 |
1] - self.scheduler.sigmas[i]
|
255 |
sigma = self.scheduler.sigmas[i]
|