jiuface commited on
Commit
31f479a
1 Parent(s): 4af8574

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -59
app.py CHANGED
@@ -71,7 +71,7 @@ def download_models():
71
  for model, (url, folder, filename) in models.items():
72
  download_file(url, folder, filename)
73
 
74
- download_models()
75
 
76
  def timer_func(func):
77
  def wrapper(*args, **kwargs):
@@ -144,9 +144,6 @@ class LazyRealESRGAN:
144
  self.load_model()
145
  return self.model.predict(img)
146
 
147
- lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
148
- lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
149
-
150
  @timer_func
151
  def resize_and_upscale(input_image, resolution):
152
  scale = 2 if resolution <= 2048 else 4
@@ -176,8 +173,7 @@ def create_hdr_effect(original_image, hdr):
176
  hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
177
  return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
178
 
179
- lazy_pipe = LazyLoadPipeline()
180
- lazy_pipe.load()
181
 
182
  def prepare_image(input_image, resolution, hdr):
183
  condition_image = resize_and_upscale(input_image, resolution)
@@ -453,59 +449,6 @@ class ControlNetDepthDesignModelMulti:
453
  return design_image
454
 
455
 
456
- def create_demo(model):
457
- gr.Markdown("### Just try zeroGPU")
458
- with gr.Row():
459
- with gr.Column():
460
- input_image = gr.Image(label="Input Image", type='pil', elem_id='img-display-input')
461
- input_text = gr.Textbox(label='Prompt', placeholder='Please upload your image first', lines=2)
462
- with gr.Accordion('Advanced options', open=False):
463
-
464
- num_steps = gr.Slider(label='Steps',
465
- minimum=1,
466
- maximum=50,
467
- value=50,
468
- step=1)
469
- img_size = gr.Slider(label='Image size',
470
- minimum=256,
471
- maximum=768,
472
- value=768,
473
- step=64)
474
- guidance_scale = gr.Slider(label='Guidance Scale',
475
- minimum=0.1,
476
- maximum=30.0,
477
- value=10.0,
478
- step=0.1)
479
- seed = gr.Slider(label='Seed',
480
- minimum=-1,
481
- maximum=2147483647,
482
- value=323*111,
483
- step=1,
484
- randomize=True)
485
- strength = gr.Slider(label='Strength',
486
- minimum=0.1,
487
- maximum=1.0,
488
- value=0.9,
489
- step=0.1)
490
- a_prompt = gr.Textbox(
491
- label='Added Prompt',
492
- value="interior design, 4K, high resolution, photorealistic")
493
- n_prompt = gr.Textbox(
494
- label='Negative Prompt',
495
- value="window, door, low resolution, banner, logo, watermark, text, deformed, blurry, out of focus, surreal, ugly, beginner")
496
-
497
- resolution = gr.Slider(minimum=256, maximum=2048, value=512, step=256, label="Resolution")
498
- num_inference_steps = gr.Slider(minimum=1, maximum=50, value=20, step=1, label="Number of Inference Steps")
499
- strength = gr.Slider(minimum=0, maximum=1, value=0.4, step=0.01, label="Strength")
500
- hdr = gr.Slider(minimum=0, maximum=1, value=0, step=0.1, label="HDR Effect")
501
- guidance_scale = gr.Slider(minimum=0, maximum=20, value=3, step=0.5, label="Guidance Scale")
502
-
503
- submit = gr.Button("Submit")
504
-
505
- with gr.Column():
506
- design_image = gr.Image(label="Output Mask", elem_id='img-display-output')
507
-
508
-
509
  def on_submit(image, text, num_steps, guidance_scale, seed, strength, a_prompt, n_prompt, img_size):
510
  model.seed = seed
511
  model.neg_prompt = n_prompt
@@ -548,6 +491,13 @@ seg_image_processor, image_segmentor = get_segmentation_pipeline()
548
  depth_feature_extractor, depth_estimator = get_depth_pipeline()
549
  depth_estimator = depth_estimator.to(device)
550
 
 
 
 
 
 
 
 
551
 
552
 
553
  def main():
 
71
  for model, (url, folder, filename) in models.items():
72
  download_file(url, folder, filename)
73
 
74
+
75
 
76
  def timer_func(func):
77
  def wrapper(*args, **kwargs):
 
144
  self.load_model()
145
  return self.model.predict(img)
146
 
 
 
 
147
  @timer_func
148
  def resize_and_upscale(input_image, resolution):
149
  scale = 2 if resolution <= 2048 else 4
 
173
  hdr_image_8bit = np.clip(hdr_image * 255, 0, 255).astype('uint8')
174
  return Image.fromarray(cv2.cvtColor(hdr_image_8bit, cv2.COLOR_BGR2RGB))
175
 
176
+
 
177
 
178
  def prepare_image(input_image, resolution, hdr):
179
  condition_image = resize_and_upscale(input_image, resolution)
 
449
  return design_image
450
 
451
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452
  def on_submit(image, text, num_steps, guidance_scale, seed, strength, a_prompt, n_prompt, img_size):
453
  model.seed = seed
454
  model.neg_prompt = n_prompt
 
491
  depth_feature_extractor, depth_estimator = get_depth_pipeline()
492
  depth_estimator = depth_estimator.to(device)
493
 
494
+ download_models()
495
+ lazy_realesrgan_x2 = LazyRealESRGAN(device, scale=2)
496
+ lazy_realesrgan_x4 = LazyRealESRGAN(device, scale=4)
497
+
498
+ lazy_pipe = LazyLoadPipeline()
499
+ lazy_pipe.load()
500
+
501
 
502
 
503
  def main():