Spaces:
Running
on
Zero
Running
on
Zero
Avijit Ghosh
commited on
Commit
•
85b09dd
1
Parent(s):
ad93a8b
Add SD2
Browse files
app.py
CHANGED
@@ -1,6 +1,13 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
-
from diffusers import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
5 |
from pathlib import Path
|
6 |
from safetensors.torch import load_file
|
@@ -54,7 +61,14 @@ def load_model(model_name):
|
|
54 |
elif model_name == "stabilityai/stable-diffusion-3-medium-diffusers":
|
55 |
pipeline = StableDiffusion3Pipeline.from_pretrained(
|
56 |
model_name,
|
57 |
-
torch_dtype=torch.float16
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
).to("cuda")
|
59 |
else:
|
60 |
raise ValueError("Unknown model name")
|
@@ -77,6 +91,8 @@ def getimgen(prompt, model_name):
|
|
77 |
return pipeline_text2image(prompt=prompt, negative_prompt=neg_prompt).images[0]
|
78 |
elif model_name == "stabilityai/stable-diffusion-3-medium-diffusers":
|
79 |
return pipeline_text2image(prompt=prompt, negative_prompt="", num_inference_steps=28, guidance_scale=7.0).images[0]
|
|
|
|
|
80 |
|
81 |
blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
82 |
blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large", torch_dtype=torch.float16).to("cuda")
|
@@ -167,7 +183,8 @@ This demo provides an insightful look into how current text-to-image models hand
|
|
167 |
choices=[
|
168 |
"stabilityai/stable-diffusion-3-medium-diffusers",
|
169 |
"stabilityai/sdxl-turbo",
|
170 |
-
"ByteDance/SDXL-Lightning",
|
|
|
171 |
"runwayml/stable-diffusion-v1-5",
|
172 |
"segmind/SSD-1B"
|
173 |
],
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
from diffusers import (
|
4 |
+
DiffusionPipeline,
|
5 |
+
StableDiffusionPipeline,
|
6 |
+
StableDiffusionXLPipeline,
|
7 |
+
EulerDiscreteScheduler,
|
8 |
+
UNet2DConditionModel,
|
9 |
+
StableDiffusion3Pipeline
|
10 |
+
)
|
11 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
12 |
from pathlib import Path
|
13 |
from safetensors.torch import load_file
|
|
|
61 |
elif model_name == "stabilityai/stable-diffusion-3-medium-diffusers":
|
62 |
pipeline = StableDiffusion3Pipeline.from_pretrained(
|
63 |
model_name,
|
64 |
+
torch_dtype=torch.float16
|
65 |
+
).to("cuda")
|
66 |
+
elif model_name == "stabilityai/stable-diffusion-2":
|
67 |
+
scheduler = EulerDiscreteScheduler.from_pretrained(model_name, subfolder="scheduler")
|
68 |
+
pipeline = StableDiffusionPipeline.from_pretrained(
|
69 |
+
model_name,
|
70 |
+
scheduler=scheduler,
|
71 |
+
torch_dtype=torch.float16
|
72 |
).to("cuda")
|
73 |
else:
|
74 |
raise ValueError("Unknown model name")
|
|
|
91 |
return pipeline_text2image(prompt=prompt, negative_prompt=neg_prompt).images[0]
|
92 |
elif model_name == "stabilityai/stable-diffusion-3-medium-diffusers":
|
93 |
return pipeline_text2image(prompt=prompt, negative_prompt="", num_inference_steps=28, guidance_scale=7.0).images[0]
|
94 |
+
elif model_name == "stabilityai/stable-diffusion-2":
|
95 |
+
return pipeline_text2image(prompt=prompt).images[0]
|
96 |
|
97 |
blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
|
98 |
blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large", torch_dtype=torch.float16).to("cuda")
|
|
|
183 |
choices=[
|
184 |
"stabilityai/stable-diffusion-3-medium-diffusers",
|
185 |
"stabilityai/sdxl-turbo",
|
186 |
+
"ByteDance/SDXL-Lightning",
|
187 |
+
"stabilityai/stable-diffusion-2",
|
188 |
"runwayml/stable-diffusion-v1-5",
|
189 |
"segmind/SSD-1B"
|
190 |
],
|