mskrt commited on
Commit
e4e13db
verified
1 Parent(s): 3e9f243

Upload 13 files

Browse files
Files changed (2) hide show
  1. model_index.json +20 -11
  2. pipeline.py +31 -23
model_index.json CHANGED
@@ -1,15 +1,24 @@
1
  {
2
  "_class_name": "SuperDiffPipeline",
3
  "_diffusers_version": "0.31.0",
4
- "batch_size": null,
5
- "device": "cuda",
6
- "guidance_scale": null,
7
- "lift": null,
8
- "num_inference_steps": null,
9
- "scheduler": "EulerDiscreteScheduler",
10
- "seed": null,
11
- "text_encoder": "CLIPTextModel",
12
- "tokenizer": "CLIPTokenizer",
13
- "unet": "UNet2DConditionModel",
14
- "vae": "AutoencoderKL"
 
 
 
 
 
 
 
 
 
15
  }
 
1
  {
2
  "_class_name": "SuperDiffPipeline",
3
  "_diffusers_version": "0.31.0",
4
+ "scheduler": [
5
+ "diffusers",
6
+ "EulerDiscreteScheduler"
7
+ ],
8
+ "text_encoder": [
9
+ "transformers",
10
+ "CLIPTextModel"
11
+ ],
12
+ "tokenizer": [
13
+ "transformers",
14
+ "CLIPTokenizer"
15
+ ],
16
+ "unet": [
17
+ "diffusers",
18
+ "UNet2DConditionModel"
19
+ ],
20
+ "vae": [
21
+ "diffusers",
22
+ "AutoencoderKL"
23
+ ]
24
  }
pipeline.py CHANGED
@@ -1,4 +1,5 @@
1
  import random
 
2
  from typing import Callable, Dict, List, Optional
3
 
4
  import torch
@@ -33,31 +34,38 @@ class SuperDiffPipeline(DiffusionPipeline, ConfigMixin):
33
 
34
  """
35
  super().__init__()
36
- self.unet = unet
37
- self.vae = vae
38
- self.text_encoder = text_encoder
39
- self.tokenizer = tokenizer
40
- self.scheduler = scheduler
 
 
41
 
42
  device = "cuda" if torch.cuda.is_available() else "cpu"
43
 
44
- self.vae.to(device)
45
- self.unet.to(device)
46
- self.text_encoder.to(device)
47
-
48
- self.register_to_config(
49
- vae=vae.__class__.__name__,
50
- scheduler=scheduler.__class__.__name__,
51
- tokenizer=tokenizer.__class__.__name__,
52
- unet=unet.__class__.__name__,
53
- text_encoder=text_encoder.__class__.__name__,
54
- device=device,
55
- batch_size=None,
56
- num_inference_steps=None,
57
- guidance_scale=None,
58
- lift=None,
59
- seed=None,
60
- )
 
 
 
 
 
61
 
62
  @torch.no_grad
63
  def get_batch(self, latents: Callable, nrow: int, ncol: int) -> Callable:
@@ -241,7 +249,7 @@ class SuperDiffPipeline(DiffusionPipeline, ConfigMixin):
241
  (self.num_inference_steps + 1, self.batch_size), device=self.device
242
  )
243
  with torch.no_grad():
244
- for i, t in enumerate(self.scheduler.timesteps):
245
  dsigma = self.scheduler.sigmas[i +
246
  1] - self.scheduler.sigmas[i]
247
  sigma = self.scheduler.sigmas[i]
 
1
  import random
2
+ from tqdm import tqdm
3
  from typing import Callable, Dict, List, Optional
4
 
5
  import torch
 
34
 
35
  """
36
  super().__init__()
37
+ # Register additional parameters for flexibility
38
+ # Explicitly assign required components
39
+ #self.unet = unet
40
+ #self.vae = vae
41
+ #self.text_encoder = text_encoder
42
+ #self.tokenizer = tokenizer
43
+ #self.scheduler = scheduler
44
 
45
  device = "cuda" if torch.cuda.is_available() else "cpu"
46
 
47
+ vae.to(device)
48
+ unet.to(device)
49
+ text_encoder.to(device)
50
+ self.register_modules(unet=unet,
51
+ scheduler=scheduler,
52
+ vae=vae,
53
+ text_encoder=text_encoder,
54
+ tokenizer=tokenizer,)
55
+
56
+ #self.register_to_config(
57
+ # vae=vae.__class__.__name__,
58
+ # scheduler=scheduler.__class__.__name__,
59
+ # tokenizer=tokenizer.__class__.__name__,
60
+ # unet=unet.__class__.__name__,
61
+ # text_encoder=text_encoder.__class__.__name__,
62
+ # device=device,
63
+ # batch_size=None,
64
+ # num_inference_steps=None,
65
+ # guidance_scale=None,
66
+ # lift=None,
67
+ # seed=None,
68
+ #)
69
 
70
  @torch.no_grad
71
  def get_batch(self, latents: Callable, nrow: int, ncol: int) -> Callable:
 
249
  (self.num_inference_steps + 1, self.batch_size), device=self.device
250
  )
251
  with torch.no_grad():
252
+ for i, t in tqdm(enumerate(self.scheduler.timesteps)):
253
  dsigma = self.scheduler.sigmas[i +
254
  1] - self.scheduler.sigmas[i]
255
  sigma = self.scheduler.sigmas[i]