prithivMLmods commited on
Commit
f5c917d
1 Parent(s): 8f08ef6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -28
app.py CHANGED
@@ -9,17 +9,19 @@ import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
 
12
- #html_file_url = "https://prithivhamster.vercel.app/"
13
- #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:400px; border:none"></iframe>'
14
-
15
- DESCRIPTIONx = """## STABLE HAMSTER 🐹"""
16
-
17
  css = '''
18
- .gradio-container{max-width: 575px !important}
19
  h1{text-align:center}
20
  footer {
21
  visibility: hidden
22
  }
 
 
 
 
 
 
 
23
  '''
24
 
25
  examples = [
@@ -30,14 +32,12 @@ examples = [
30
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K"
31
  ]
32
 
33
- #echo %VAR_NAME%
34
- MODEL_ID = os.getenv("MODEL_VAL_PATH") #Use SDXL Model as "MODEL_REPO" --------->>> ”VALUE”.
35
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
36
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
37
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
38
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
39
 
40
- #Load model outside of function
41
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
42
  pipe = StableDiffusionXLPipeline.from_pretrained(
43
  MODEL_ID,
@@ -47,11 +47,9 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
47
  ).to(device)
48
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
49
 
50
- # <compile speedup >
51
  if USE_TORCH_COMPILE:
52
  pipe.compile()
53
 
54
- # Offloading capacity (RAM)
55
  if ENABLE_CPU_OFFLOAD:
56
  pipe.enable_model_cpu_offload()
57
 
@@ -85,7 +83,6 @@ def generate(
85
  seed = int(randomize_seed_fn(seed, randomize_seed))
86
  generator = torch.Generator(device=device).manual_seed(seed)
87
 
88
- #Options
89
  options = {
90
  "prompt": [prompt] * num_images,
91
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
@@ -97,11 +94,9 @@ def generate(
97
  "output_type": "pil",
98
  }
99
 
100
- #VRAM usage Lesser
101
  if use_resolution_binning:
102
  options["use_resolution_binning"] = True
103
 
104
- #Images potential batches
105
  images = []
106
  for i in range(0, num_images, BATCH_SIZE):
107
  batch_options = options.copy()
@@ -112,21 +107,24 @@ def generate(
112
 
113
  image_paths = [save_image(img) for img in images]
114
  return image_paths, seed
115
- #Main gr.Block
116
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
117
- gr.Markdown(DESCRIPTIONx)
118
  with gr.Row():
119
- prompt = gr.Text(
120
- label="Prompt",
121
- show_label=False,
122
- max_lines=1,
123
- placeholder="Enter your prompt",
124
- container=False,
125
- )
126
- run_button = gr.Button("Run", scale=0)
127
- result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
 
 
 
 
128
 
129
- with gr.Accordion("Advanced options", open=False):
130
  num_images = gr.Slider(
131
  label="Number of Images",
132
  minimum=1,
@@ -183,6 +181,9 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
183
  value=23,
184
  )
185
 
 
 
 
186
  gr.Examples(
187
  examples=examples,
188
  inputs=prompt,
@@ -216,6 +217,6 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
216
  outputs=[result, seed],
217
  api_name="run",
218
  )
219
- #gr.HTML(html_content)
220
  if __name__ == "__main__":
221
  demo.queue(max_size=40).launch()
 
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
 
 
 
 
 
 
12
  css = '''
13
+ .gradio-container{max-width: 888px !important}
14
  h1{text-align:center}
15
  footer {
16
  visibility: hidden
17
  }
18
+ .submit-btn {
19
+ background-color: #6263c7 !important;
20
+ color: white !important;
21
+ }
22
+ .submit-btn:hover {
23
+ background-color: #6063ff !important;
24
+ }
25
  '''
26
 
27
  examples = [
 
32
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K"
33
  ]
34
 
35
+ MODEL_ID = os.getenv("MODEL_VAL_PATH")
 
36
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
37
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
38
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
39
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
40
 
 
41
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
42
  pipe = StableDiffusionXLPipeline.from_pretrained(
43
  MODEL_ID,
 
47
  ).to(device)
48
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
49
 
 
50
  if USE_TORCH_COMPILE:
51
  pipe.compile()
52
 
 
53
  if ENABLE_CPU_OFFLOAD:
54
  pipe.enable_model_cpu_offload()
55
 
 
83
  seed = int(randomize_seed_fn(seed, randomize_seed))
84
  generator = torch.Generator(device=device).manual_seed(seed)
85
 
 
86
  options = {
87
  "prompt": [prompt] * num_images,
88
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
 
94
  "output_type": "pil",
95
  }
96
 
 
97
  if use_resolution_binning:
98
  options["use_resolution_binning"] = True
99
 
 
100
  images = []
101
  for i in range(0, num_images, BATCH_SIZE):
102
  batch_options = options.copy()
 
107
 
108
  image_paths = [save_image(img) for img in images]
109
  return image_paths, seed
110
+
111
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
 
112
  with gr.Row():
113
+ with gr.Column(scale=1):
114
+ prompt = gr.Text(
115
+ label="Prompt",
116
+ show_label=False,
117
+ max_lines=1,
118
+ placeholder="Enter your prompt",
119
+ container=False,
120
+ )
121
+ run_button = gr.Button(
122
+ "Generate as ( 1024 x 1024 )🤗",
123
+ scale=0,
124
+ elem_classes="submit-btn"
125
+ )
126
 
127
+ with gr.Accordion("Advanced options", open=True):
128
  num_images = gr.Slider(
129
  label="Number of Images",
130
  minimum=1,
 
181
  value=23,
182
  )
183
 
184
+ with gr.Column(scale=2):
185
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
186
+
187
  gr.Examples(
188
  examples=examples,
189
  inputs=prompt,
 
217
  outputs=[result, seed],
218
  api_name="run",
219
  )
220
+
221
  if __name__ == "__main__":
222
  demo.queue(max_size=40).launch()