prithivMLmods commited on
Commit
1013a67
1 Parent(s): 90911cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -227
app.py CHANGED
@@ -1,227 +0,0 @@
1
- import os
2
- import random
3
- import uuid
4
-
5
- import gradio as gr
6
- import numpy as np
7
- from PIL import Image
8
- import spaces
9
- import torch
10
- from diffusers import StableDiffusion3Pipeline, DPMSolverMultistepScheduler, AutoencoderKL
11
- from huggingface_hub import snapshot_download
12
-
13
- huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
14
-
15
- model_path = snapshot_download(
16
- repo_id="stabilityai/stable-diffusion-3-medium",
17
- revision="refs/pr/26",
18
- repo_type="model",
19
- ignore_patterns=["*.md", "*..gitattributes"],
20
- local_dir="stable-diffusion-3-medium",
21
- token=huggingface_token, # yeni bir token-id yazın.
22
- )
23
-
24
- DESCRIPTION = """# Stable Diffusion 3"""
25
- if not torch.cuda.is_available():
26
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
27
-
28
- MAX_SEED = np.iinfo(np.int32).max
29
- CACHE_EXAMPLES = False
30
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
31
- USE_TORCH_COMPILE = False
32
- ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
33
-
34
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
35
-
36
- pipe = StableDiffusion3Pipeline.from_pretrained(model_path, torch_dtype=torch.float16)
37
-
38
-
39
- def save_image(img):
40
- unique_name = str(uuid.uuid4()) + ".png"
41
- img.save(unique_name)
42
- return unique_name
43
-
44
-
45
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
46
- if randomize_seed:
47
- seed = random.randint(0, MAX_SEED)
48
- return seed
49
-
50
-
51
- @spaces.GPU(duration=30,enable_queue=True)
52
- def generate(
53
- prompt: str,
54
- negative_prompt: str = "",
55
- use_negative_prompt: bool = False,
56
- seed: int = 0,
57
- width: int = 1024,
58
- height: int = 1024,
59
- guidance_scale: float = 7,
60
- randomize_seed: bool = False,
61
- num_inference_steps=30,
62
- NUM_IMAGES_PER_PROMPT=1,
63
- use_resolution_binning: bool = True,
64
- progress=gr.Progress(track_tqdm=True),
65
- ):
66
- pipe.to(device)
67
- seed = int(randomize_seed_fn(seed, randomize_seed))
68
- generator = torch.Generator().manual_seed(seed)
69
-
70
- #pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
71
-
72
- if not use_negative_prompt:
73
- negative_prompt = None # type: ignore
74
-
75
- output = pipe(
76
- prompt=prompt,
77
- negative_prompt=negative_prompt,
78
- width=width,
79
- height=height,
80
- guidance_scale=guidance_scale,
81
- num_inference_steps=num_inference_steps,
82
- generator=generator,
83
- num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
84
- output_type="pil",
85
- ).images
86
-
87
- return output
88
-
89
-
90
- examples = [
91
- "neon holography crystal cat",
92
- "a cat eating a piece of cheese",
93
- "an astronaut riding a horse in space",
94
- "a cartoon of a boy playing with a tiger",
95
- "a cute robot artist painting on an easel, concept art",
96
- "a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
97
- ]
98
-
99
- css = '''
100
- .gradio-container{max-width: 1000px !important}
101
- h1{text-align:center}
102
- '''
103
- with gr.Blocks(css=css) as demo:
104
- with gr.Row():
105
- with gr.Column():
106
- gr.HTML(
107
- """
108
- <h1 style='text-align: center'>
109
- Stable Diffusion 3
110
- </h1>
111
- """
112
- )
113
- gr.HTML(
114
- """
115
- <h3 style='text-align: center'>
116
- Follow me for more!
117
- <a href='https://twitter.com/sot_data' target='_blank'>Twitter</a> | <a href='https://github.com/sourceoftruthdata' target='_blank'>Github</a> | <a href='https://www.linkedin.com/in/danielwcovarrubias/' target='_blank'>Linkedin</a>
118
- </h3>
119
- """
120
- )
121
- with gr.Group():
122
- with gr.Row():
123
- prompt = gr.Text(
124
- label="Prompt",
125
- show_label=False,
126
- max_lines=1,
127
- placeholder="Enter your prompt",
128
- container=False,
129
- )
130
- run_button = gr.Button("Run", scale=0)
131
- result = gr.Gallery(label="Result", elem_id="gallery", show_label=False)
132
- with gr.Accordion("Advanced options", open=False):
133
- with gr.Row():
134
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
135
- negative_prompt = gr.Text(
136
- label="Negative prompt",
137
- max_lines=1,
138
- value = "deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, NSFW",
139
- visible=True,
140
- )
141
- seed = gr.Slider(
142
- label="Seed",
143
- minimum=0,
144
- maximum=MAX_SEED,
145
- step=1,
146
- value=0,
147
- )
148
-
149
- steps = gr.Slider(
150
- label="Steps",
151
- minimum=0,
152
- maximum=60,
153
- step=1,
154
- value=25,
155
- )
156
- number_image = gr.Slider(
157
- label="Number of Image",
158
- minimum=1,
159
- maximum=4,
160
- step=1,
161
- value=1,
162
- )
163
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
164
- with gr.Row(visible=True):
165
- width = gr.Slider(
166
- label="Width",
167
- minimum=256,
168
- maximum=MAX_IMAGE_SIZE,
169
- step=32,
170
- value=1024,
171
- )
172
- height = gr.Slider(
173
- label="Height",
174
- minimum=256,
175
- maximum=MAX_IMAGE_SIZE,
176
- step=32,
177
- value=1024,
178
- )
179
- with gr.Row():
180
- guidance_scale = gr.Slider(
181
- label="Guidance Scale",
182
- minimum=0.1,
183
- maximum=10,
184
- step=0.1,
185
- value=7.0,
186
- )
187
-
188
- gr.Examples(
189
- examples=examples,
190
- inputs=prompt,
191
- outputs=[result],
192
- fn=generate,
193
- cache_examples=CACHE_EXAMPLES,
194
- )
195
-
196
- use_negative_prompt.change(
197
- fn=lambda x: gr.update(visible=x),
198
- inputs=use_negative_prompt,
199
- outputs=negative_prompt,
200
- api_name=False,
201
- )
202
-
203
- gr.on(
204
- triggers=[
205
- prompt.submit,
206
- negative_prompt.submit,
207
- run_button.click,
208
- ],
209
- fn=generate,
210
- inputs=[
211
- prompt,
212
- negative_prompt,
213
- use_negative_prompt,
214
- seed,
215
- width,
216
- height,
217
- guidance_scale,
218
- randomize_seed,
219
- steps,
220
- number_image,
221
- ],
222
- outputs=[result],
223
- api_name="run",
224
- )
225
-
226
- if __name__ == "__main__":
227
- demo.queue().launch()