Update pipeline.py
Browse files- pipeline.py +0 -22
pipeline.py
CHANGED
@@ -34,13 +34,6 @@ class SuperDiffPipeline(DiffusionPipeline, ConfigMixin):
|
|
34 |
|
35 |
"""
|
36 |
super().__init__()
|
37 |
-
# Register additional parameters for flexibility
|
38 |
-
# Explicitly assign required components
|
39 |
-
#self.unet = unet
|
40 |
-
#self.vae = vae
|
41 |
-
#self.text_encoder = text_encoder
|
42 |
-
#self.tokenizer = tokenizer
|
43 |
-
#self.scheduler = scheduler
|
44 |
|
45 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
46 |
|
@@ -53,21 +46,6 @@ class SuperDiffPipeline(DiffusionPipeline, ConfigMixin):
|
|
53 |
text_encoder=text_encoder,
|
54 |
tokenizer=tokenizer,)
|
55 |
|
56 |
-
|
57 |
-
#self.register_to_config(
|
58 |
-
# vae=vae.__class__.__name__,
|
59 |
-
# scheduler=scheduler.__class__.__name__,
|
60 |
-
# tokenizer=tokenizer.__class__.__name__,
|
61 |
-
# unet=unet.__class__.__name__,
|
62 |
-
# text_encoder=text_encoder.__class__.__name__,
|
63 |
-
# device=device,
|
64 |
-
# batch_size=None,
|
65 |
-
# num_inference_steps=None,
|
66 |
-
# guidance_scale=None,
|
67 |
-
# lift=None,
|
68 |
-
# seed=None,
|
69 |
-
#)
|
70 |
-
|
71 |
@torch.no_grad
|
72 |
def get_batch(self, latents: Callable, nrow: int, ncol: int) -> Callable:
|
73 |
"""get_batch.
|
|
|
34 |
|
35 |
"""
|
36 |
super().__init__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
39 |
|
|
|
46 |
text_encoder=text_encoder,
|
47 |
tokenizer=tokenizer,)
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
@torch.no_grad
|
50 |
def get_batch(self, latents: Callable, nrow: int, ncol: int) -> Callable:
|
51 |
"""get_batch.
|