John6666 commited on
Commit
5da9aec
1 Parent(s): b435ce2

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +189 -89
  2. mod.py +7 -6
app.py CHANGED
@@ -2,10 +2,10 @@ import spaces
2
  import gradio as gr
3
  import json
4
  import torch
5
- from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
6
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
7
  from diffusers.utils import load_image
8
- from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel, FluxControlNetImg2ImgPipeline, FluxTransformer2DModel
9
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download, HfApi
10
  import os
11
  import copy
@@ -47,6 +47,8 @@ good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtyp
47
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1, token=HF_TOKEN)
48
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
49
  tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
 
 
50
  controlnet_union = None
51
  controlnet = None
52
  last_model = models[0]
@@ -58,29 +60,36 @@ last_cn_on = False
58
  MAX_SEED = 2**32-1
59
 
60
  def unload_lora():
61
- global pipe, pipe_i2i
62
  try:
63
  #pipe.unfuse_lora()
64
  pipe.unload_lora_weights()
65
  #pipe_i2i.unfuse_lora()
66
  pipe_i2i.unload_lora_weights()
 
67
  except Exception as e:
68
  print(e)
69
 
 
 
 
 
 
70
  # https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union
71
  # https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union
72
  # https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux
73
  #@spaces.GPU()
74
  def change_base_model(repo_id: str, cn_on: bool, disable_model_cache: bool, model_type: str, progress=gr.Progress(track_tqdm=True)):
75
- global pipe, pipe_i2i, taef1, good_vae, controlnet_union, controlnet, last_model, last_cn_on, dtype
76
  safetensors_file = None
77
  single_file_base_model = single_file_base_models.get(model_type, models[0])
78
  try:
79
  #if not disable_model_cache and (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(visible=True)
80
- if not disable_model_cache and (repo_id == last_model and cn_on is last_cn_on) or ((not is_repo_name(repo_id) or not is_repo_exists(repo_id)) and not ".safetensors" in repo_id): return gr.update(visible=True)
81
  unload_lora()
82
  pipe.to("cpu")
83
  pipe_i2i.to("cpu")
 
84
  good_vae.to("cpu")
85
  taef1.to("cpu")
86
  if controlnet is not None: controlnet.to("cpu")
@@ -96,12 +105,16 @@ def change_base_model(repo_id: str, cn_on: bool, disable_model_cache: bool, mode
96
  safetensors_file = download_file_mod(repo_id)
97
  transformer = FluxTransformer2DModel.from_single_file(safetensors_file, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model)
98
  pipe = FluxControlNetPipeline.from_pretrained(single_file_base_model, transformer=transformer, controlnet=controlnet, torch_dtype=dtype, token=HF_TOKEN)
99
- pipe_i2i = FluxControlNetImg2ImgPipeline.from_pretrained(single_file_base_model, controlnet=controlnet, vae=None, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
100
- tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
 
 
101
  else:
102
  pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype, token=HF_TOKEN)
103
- pipe_i2i = FluxControlNetImg2ImgPipeline.from_pretrained(repo_id, controlnet=controlnet, vae=None, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
104
- tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
 
 
105
  last_model = repo_id
106
  last_cn_on = cn_on
107
  progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
@@ -113,12 +126,16 @@ def change_base_model(repo_id: str, cn_on: bool, disable_model_cache: bool, mode
113
  safetensors_file = download_file_mod(repo_id)
114
  transformer = FluxTransformer2DModel.from_single_file(safetensors_file, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model)
115
  pipe = DiffusionPipeline.from_pretrained(single_file_base_model, transformer=transformer, torch_dtype=dtype, token=HF_TOKEN)
116
- pipe_i2i = AutoPipelineForImage2Image.from_pretrained(single_file_base_model, vae=None, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
117
- tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
 
 
118
  else:
119
  pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype, token=HF_TOKEN)
120
- pipe_i2i = AutoPipelineForImage2Image.from_pretrained(repo_id, vae=None, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
121
- tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
 
 
122
  last_model = repo_id
123
  last_cn_on = cn_on
124
  progress(1, desc=f"Model loaded: {repo_id}")
@@ -128,15 +145,10 @@ def change_base_model(repo_id: str, cn_on: bool, disable_model_cache: bool, mode
128
  raise gr.Error(f"Model load Error: {repo_id} {e}") from e
129
  finally:
130
  if safetensors_file and Path(safetensors_file).exists(): Path(safetensors_file).unlink()
131
- return gr.update(visible=True)
132
 
133
  change_base_model.zerogpu = True
134
 
135
- def download_file_mod(url, directory=os.getcwd()):
136
- path = download_hf_file(directory, url, hf_token=HF_TOKEN)
137
- if not path: raise Exception(f"Download error: {url}")
138
- return path
139
-
140
  def is_repo_public(repo_id: str):
141
  api = HfApi()
142
  try:
@@ -270,30 +282,34 @@ def randomize_loras(selected_indices, loras_state):
270
  random_prompt = random.choice(prompt_values)
271
  return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, random_prompt
272
 
273
- def download_loras_images(loras_json: list[dict]):
274
- loras_json = copy.deepcopy(loras_json)
275
- for i, lora in enumerate(loras_json):
 
276
  repo = lora.get("repo", None)
277
- if repo is None: continue
 
 
278
  if "title" not in lora.keys() or "trigger_word" not in lora.keys() or "image" not in lora.keys():
279
  title, _repo, _path, trigger_word, image_def = check_custom_model(repo)
280
- if "title" not in lora.keys(): loras_json[i]["title"] = title
281
- if "trigger_word" not in lora.keys(): loras_json[i]["trigger_word"] = trigger_word
282
  if "image" not in lora.keys(): lora["image"] = image_def
283
  image = lora.get("image", None)
284
  try:
285
- if not is_repo_public(repo) and image is not None and "http" in image: image = download_file_mod(image)
286
- loras_json[i]["image"] = image if image else "/home/user/app/custom.png"
287
  except Exception as e:
288
- print(e)
289
- continue
 
290
  return loras_json
291
 
292
  def add_custom_lora(custom_lora, selected_indices, current_loras, gallery):
293
  if custom_lora:
294
  try:
295
  title, repo, path, trigger_word, image = check_custom_model(custom_lora)
296
- if image is not None and "http" in image and not is_repo_public(repo):
297
  try:
298
  image = download_file_mod(image)
299
  except Exception as e:
@@ -451,66 +467,116 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, cn_on, pr
451
 
452
  @spaces.GPU(duration=70)
453
  @torch.inference_mode()
454
- def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed, cn_on, progress=gr.Progress(track_tqdm=True)):
455
- global pipe_i2i, good_vae, controlnet, controlnet_union
456
  try:
457
  good_vae.to("cuda")
458
  generator = torch.Generator(device="cuda").manual_seed(int(float(seed)))
459
- image_input = load_image(image_input_path)
460
-
 
461
  with calculateDuration("Generating image"):
462
  # Generate image
463
  modes, images, scales = get_control_params()
464
  if not cn_on or len(modes) == 0:
465
- pipe_i2i.to("cuda")
466
- pipe_i2i.vae = good_vae
467
- image_input = load_image(image_input_path)
468
- progress(0, desc="Start I2I Inference.")
469
- final_image = pipe_i2i(
470
- prompt=prompt_mash,
471
- image=image_input,
472
- strength=image_strength,
473
- num_inference_steps=steps,
474
- guidance_scale=cfg_scale,
475
- width=width,
476
- height=height,
477
- generator=generator,
478
- joint_attention_kwargs={"scale": 1.0},
479
- output_type="pil",
480
- ).images[0]
481
- return final_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482
  else:
483
- pipe_i2i.to("cuda")
484
- pipe_i2i.vae = good_vae
485
- image_input = load_image(image_input_path)
486
- if controlnet_union is not None: controlnet_union.to("cuda")
487
- if controlnet is not None: controlnet.to("cuda")
488
- pipe_i2i.enable_model_cpu_offload()
489
- progress(0, desc="Start I2I Inference with ControlNet.")
490
- final_image = pipe_i2i(
491
- prompt=prompt_mash,
492
- control_image=images,
493
- control_mode=modes,
494
- image=image_input,
495
- strength=image_strength,
496
- num_inference_steps=steps,
497
- guidance_scale=cfg_scale,
498
- width=width,
499
- height=height,
500
- controlnet_conditioning_scale=scales,
501
- generator=generator,
502
- joint_attention_kwargs={"scale": 1.0},
503
- output_type="pil",
504
- ).images[0]
505
- return final_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  except Exception as e:
507
  print(e)
508
  raise gr.Error(f"I2I Inference Error: {e}") from e
509
 
510
- def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2,
511
- randomize_seed, seed, width, height, loras_state,
512
- lora_json, cn_on, translate_on, progress=gr.Progress(track_tqdm=True)):
513
- global pipe, pipe_i2i
514
  if not selected_indices and not is_valid_lora(lora_json):
515
  gr.Info("LoRA isn't selected.")
516
  # raise gr.Error("You must select a LoRA before proceeding.")
@@ -518,6 +584,16 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
518
 
519
  selected_loras = [loras_state[idx] for idx in selected_indices]
520
 
 
 
 
 
 
 
 
 
 
 
521
  if translate_on: prompt = translate_to_en(prompt)
522
 
523
  # Build the prompt with trigger words
@@ -539,6 +615,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
539
 
540
  print(pipe.get_active_adapters()) #
541
  print(pipe_i2i.get_active_adapters()) #
 
542
 
543
  clear_cache() #
544
 
@@ -548,7 +625,10 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
548
  lora_weights = []
549
  if is_valid_lora(lora_json): # Load External LoRA weights
550
  with calculateDuration("Loading External LoRA weights"):
551
- if image_input is not None: pipe_i2i, lora_names, lora_weights = fuse_loras(pipe_i2i, lora_json)
 
 
 
552
  else: pipe, lora_names, lora_weights = fuse_loras(pipe, lora_json)
553
  trigger_word = get_trigger_word(lora_json)
554
  prompt_mash = f"{prompt_mash} {trigger_word}"
@@ -565,7 +645,15 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
565
  lora_path = lora['repo']
566
  weight_name = lora.get("weights")
567
  print(f"Lora Path: {lora_path}")
568
- if image_input is not None:
 
 
 
 
 
 
 
 
569
  pipe_i2i.load_lora_weights(
570
  lora_path,
571
  weight_name=weight_name if weight_name else None,
@@ -583,13 +671,16 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
583
  )
584
  print("Loaded LoRAs:", lora_names)
585
  if selected_indices or is_valid_lora(lora_json):
586
- if image_input is not None:
 
 
587
  pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
588
  else:
589
  pipe.set_adapters(lora_names, adapter_weights=lora_weights)
590
 
591
  print(pipe.get_active_adapters()) #
592
  print(pipe_i2i.get_active_adapters()) #
 
593
 
594
  # Set random seed for reproducibility
595
  with calculateDuration("Randomizing seed"):
@@ -598,8 +689,8 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
598
 
599
  # Generate image
600
  progress(0, desc="Running Inference.")
601
- if(image_input is not None):
602
- final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed, cn_on)
603
  yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(visible=False)
604
  else:
605
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, cn_on)
@@ -781,9 +872,16 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
781
  with gr.Row():
782
  with gr.Accordion("Advanced Settings", open=False):
783
  with gr.Row():
784
- input_image = gr.Image(label="Input image", type="filepath", height=256, sources=["upload", "clipboard"], show_share_button=False)
785
  with gr.Column():
786
- image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
 
 
 
 
 
 
 
 
787
  input_image_preprocess = gr.Checkbox(True, label="Preprocess Input image")
788
  with gr.Column():
789
  with gr.Row():
@@ -901,7 +999,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
901
  trigger_mode="once",
902
  ).success(
903
  fn=run_lora,
904
- inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2,
905
  randomize_seed, seed, width, height, loras_state, lora_repo_json, cn_on, auto_trans],
906
  outputs=[result, seed, progress_bar],
907
  queue=True,
@@ -912,7 +1010,9 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
912
  # outputs=history_gallery,
913
  ).success(save_image_history, [result, history_gallery, history_files, model_name], [history_gallery, history_files], queue=False, show_api=False)
914
 
915
- input_image.upload(preprocess_i2i_image, [input_image, input_image_preprocess, height, width], [input_image], queue=False, show_api=False)
 
 
916
  gr.on(
917
  triggers=[model_name.change, cn_on.change],
918
  fn=get_t2i_model_info,
@@ -921,7 +1021,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
921
  queue=False,
922
  show_api=False,
923
  trigger_mode="once",
924
- ).then(change_base_model, [model_name, cn_on, disable_model_cache], [result], queue=True, show_api=False)
925
  prompt_enhance.click(enhance_prompt, [prompt], [prompt], queue=False, show_api=False)
926
 
927
  gr.on(
@@ -1166,4 +1266,4 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_ca
1166
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
1167
 
1168
  app.queue()
1169
- app.launch()
 
2
  import gradio as gr
3
  import json
4
  import torch
5
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image, AutoPipelineForInpainting
6
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
7
  from diffusers.utils import load_image
8
+ from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiControlNetModel, FluxControlNetImg2ImgPipeline, FluxTransformer2DModel, FluxControlNetInpaintPipeline, FluxInpaintPipeline
9
  from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download, HfApi
10
  import os
11
  import copy
 
47
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1, token=HF_TOKEN)
48
  pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
49
  tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
50
+ pipe_ip = AutoPipelineForInpainting.from_pretrained(base_model, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
51
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
52
  controlnet_union = None
53
  controlnet = None
54
  last_model = models[0]
 
60
  MAX_SEED = 2**32-1
61
 
62
  def unload_lora():
63
+ global pipe, pipe_i2i, pipe_ip
64
  try:
65
  #pipe.unfuse_lora()
66
  pipe.unload_lora_weights()
67
  #pipe_i2i.unfuse_lora()
68
  pipe_i2i.unload_lora_weights()
69
+ pipe_ip.unload_lora_weights()
70
  except Exception as e:
71
  print(e)
72
 
73
+ def download_file_mod(url, directory=os.getcwd()):
74
+ path = download_hf_file(directory, url, hf_token=HF_TOKEN)
75
+ if not path: raise Exception(f"Download error: {url}")
76
+ return path
77
+
78
  # https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union
79
  # https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union
80
  # https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux
81
  #@spaces.GPU()
82
  def change_base_model(repo_id: str, cn_on: bool, disable_model_cache: bool, model_type: str, progress=gr.Progress(track_tqdm=True)):
83
+ global pipe, pipe_i2i, pipe_ip, taef1, good_vae, controlnet_union, controlnet, last_model, last_cn_on, dtype
84
  safetensors_file = None
85
  single_file_base_model = single_file_base_models.get(model_type, models[0])
86
  try:
87
  #if not disable_model_cache and (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(visible=True)
88
+ if not disable_model_cache and (repo_id == last_model and cn_on is last_cn_on) or ((not is_repo_name(repo_id) or not is_repo_exists(repo_id)) and not ".safetensors" in repo_id): return gr.update()
89
  unload_lora()
90
  pipe.to("cpu")
91
  pipe_i2i.to("cpu")
92
+ pipe_ip.to("cpu")
93
  good_vae.to("cpu")
94
  taef1.to("cpu")
95
  if controlnet is not None: controlnet.to("cpu")
 
105
  safetensors_file = download_file_mod(repo_id)
106
  transformer = FluxTransformer2DModel.from_single_file(safetensors_file, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model)
107
  pipe = FluxControlNetPipeline.from_pretrained(single_file_base_model, transformer=transformer, controlnet=controlnet, torch_dtype=dtype, token=HF_TOKEN)
108
+ pipe_i2i = FluxControlNetImg2ImgPipeline.from_pretrained(single_file_base_model, controlnet=controlnet, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
109
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
110
+ pipe_ip = FluxControlNetInpaintPipeline.from_pretrained(single_file_base_model, controlnet=controlnet, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
111
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
112
  else:
113
  pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype, token=HF_TOKEN)
114
+ pipe_i2i = FluxControlNetImg2ImgPipeline.from_pretrained(repo_id, controlnet=controlnet, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
115
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
116
+ pipe_ip = FluxControlNetInpaintPipeline.from_pretrained(repo_id, controlnet=controlnet, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
117
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
118
  last_model = repo_id
119
  last_cn_on = cn_on
120
  progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
 
126
  safetensors_file = download_file_mod(repo_id)
127
  transformer = FluxTransformer2DModel.from_single_file(safetensors_file, subfolder="transformer", torch_dtype=dtype, config=single_file_base_model)
128
  pipe = DiffusionPipeline.from_pretrained(single_file_base_model, transformer=transformer, torch_dtype=dtype, token=HF_TOKEN)
129
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(single_file_base_model, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
130
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
131
+ pipe_ip = AutoPipelineForInpainting.from_pretrained(single_file_base_model, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
132
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
133
  else:
134
  pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype, token=HF_TOKEN)
135
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(repo_id, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
136
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
137
+ pipe_ip = AutoPipelineForInpainting.from_pretrained(repo_id, vae=good_vae, transformer=pipe.transformer, text_encoder=pipe.text_encoder,
138
+ tokenizer=pipe.tokenizer, text_encoder_2=pipe.text_encoder_2, tokenizer_2=pipe.tokenizer_2, torch_dtype=dtype, token=HF_TOKEN)
139
  last_model = repo_id
140
  last_cn_on = cn_on
141
  progress(1, desc=f"Model loaded: {repo_id}")
 
145
  raise gr.Error(f"Model load Error: {repo_id} {e}") from e
146
  finally:
147
  if safetensors_file and Path(safetensors_file).exists(): Path(safetensors_file).unlink()
148
+ return gr.update()
149
 
150
  change_base_model.zerogpu = True
151
 
 
 
 
 
 
152
  def is_repo_public(repo_id: str):
153
  api = HfApi()
154
  try:
 
282
  random_prompt = random.choice(prompt_values)
283
  return selected_info_1, selected_info_2, selected_indices, lora_scale_1, lora_scale_2, lora_image_1, lora_image_2, random_prompt
284
 
285
+ def download_loras_images(loras_json_orig: list[dict]):
286
+ api = HfApi(token=HF_TOKEN)
287
+ loras_json = []
288
+ for lora in loras_json_orig:
289
  repo = lora.get("repo", None)
290
+ if repo is None or not api.repo_exists(repo_id=repo, token=HF_TOKEN):
291
+ print(f"LoRA '{repo}' is not exsit.")
292
+ continue
293
  if "title" not in lora.keys() or "trigger_word" not in lora.keys() or "image" not in lora.keys():
294
  title, _repo, _path, trigger_word, image_def = check_custom_model(repo)
295
+ if "title" not in lora.keys(): lora["title"] = title
296
+ if "trigger_word" not in lora.keys(): lora["trigger_word"] = trigger_word
297
  if "image" not in lora.keys(): lora["image"] = image_def
298
  image = lora.get("image", None)
299
  try:
300
+ if not is_repo_public(repo) and image is not None and "http" in image and repo in image: image = download_file_mod(image)
301
+ lora["image"] = image if image else "/home/user/app/custom.png"
302
  except Exception as e:
303
+ print(f"Failed to download LoRA '{repo}''s image '{image if image else ''}'. {e}")
304
+ lora["image"] = "/home/user/app/custom.png"
305
+ loras_json.append(lora)
306
  return loras_json
307
 
308
  def add_custom_lora(custom_lora, selected_indices, current_loras, gallery):
309
  if custom_lora:
310
  try:
311
  title, repo, path, trigger_word, image = check_custom_model(custom_lora)
312
+ if image is not None and "http" in image and not is_repo_public(repo) and repo in image:
313
  try:
314
  image = download_file_mod(image)
315
  except Exception as e:
 
467
 
468
  @spaces.GPU(duration=70)
469
  @torch.inference_mode()
470
+ def generate_image_to_image(prompt_mash, image_input_path_dict, image_strength, is_inpaint, blur_mask, blur_factor, steps, cfg_scale, width, height, seed, cn_on, progress=gr.Progress(track_tqdm=True)):
471
+ global pipe_i2i, pipe_ip, good_vae, controlnet, controlnet_union
472
  try:
473
  good_vae.to("cuda")
474
  generator = torch.Generator(device="cuda").manual_seed(int(float(seed)))
475
+ image_input_path = image_input_path_dict['background']
476
+ mask_path = image_input_path_dict['layers'][0]
477
+
478
  with calculateDuration("Generating image"):
479
  # Generate image
480
  modes, images, scales = get_control_params()
481
  if not cn_on or len(modes) == 0:
482
+ if is_inpaint: # Inpainting
483
+ pipe_ip.to("cuda")
484
+ pipe_ip.vae = good_vae
485
+ image_input = load_image(image_input_path)
486
+ mask_input = load_image(mask_path)
487
+ if blur_mask: mask_input = pipe_ip.mask_processor.blur(mask_input, blur_factor=blur_factor)
488
+ progress(0, desc="Start Inpainting Inference.")
489
+ final_image = pipe_ip(
490
+ prompt=prompt_mash,
491
+ image=image_input,
492
+ mask_image=mask_input,
493
+ strength=image_strength,
494
+ num_inference_steps=steps,
495
+ guidance_scale=cfg_scale,
496
+ width=width,
497
+ height=height,
498
+ generator=generator,
499
+ joint_attention_kwargs={"scale": 1.0},
500
+ output_type="pil",
501
+ ).images[0]
502
+ return final_image
503
+ else:
504
+ pipe_i2i.to("cuda")
505
+ pipe_i2i.vae = good_vae
506
+ image_input = load_image(image_input_path)
507
+ progress(0, desc="Start I2I Inference.")
508
+ final_image = pipe_i2i(
509
+ prompt=prompt_mash,
510
+ image=image_input,
511
+ strength=image_strength,
512
+ num_inference_steps=steps,
513
+ guidance_scale=cfg_scale,
514
+ width=width,
515
+ height=height,
516
+ generator=generator,
517
+ joint_attention_kwargs={"scale": 1.0},
518
+ output_type="pil",
519
+ ).images[0]
520
+ return final_image
521
  else:
522
+ if is_inpaint: # Inpainting
523
+ pipe_ip.to("cuda")
524
+ pipe_ip.vae = good_vae
525
+ image_input = load_image(image_input_path)
526
+ mask_input = load_image(mask_path)
527
+ if blur_mask: mask_input = pipe_ip.mask_processor.blur(mask_input, blur_factor=blur_factor)
528
+ if controlnet_union is not None: controlnet_union.to("cuda")
529
+ if controlnet is not None: controlnet.to("cuda")
530
+ pipe_ip.enable_model_cpu_offload()
531
+ progress(0, desc="Start Inpainting Inference with ControlNet.")
532
+ final_image = pipe_ip(
533
+ prompt=prompt_mash,
534
+ control_image=images,
535
+ control_mode=modes,
536
+ image=image_input,
537
+ mask_image=mask_input,
538
+ strength=image_strength,
539
+ num_inference_steps=steps,
540
+ guidance_scale=cfg_scale,
541
+ width=width,
542
+ height=height,
543
+ controlnet_conditioning_scale=scales,
544
+ generator=generator,
545
+ joint_attention_kwargs={"scale": 1.0},
546
+ output_type="pil",
547
+ ).images[0]
548
+ return final_image
549
+ else:
550
+ pipe_i2i.to("cuda")
551
+ pipe_i2i.vae = good_vae
552
+ image_input = load_image(image_input_path['background'])
553
+ if controlnet_union is not None: controlnet_union.to("cuda")
554
+ if controlnet is not None: controlnet.to("cuda")
555
+ pipe_i2i.enable_model_cpu_offload()
556
+ progress(0, desc="Start I2I Inference with ControlNet.")
557
+ final_image = pipe_i2i(
558
+ prompt=prompt_mash,
559
+ control_image=images,
560
+ control_mode=modes,
561
+ image=image_input,
562
+ strength=image_strength,
563
+ num_inference_steps=steps,
564
+ guidance_scale=cfg_scale,
565
+ width=width,
566
+ height=height,
567
+ controlnet_conditioning_scale=scales,
568
+ generator=generator,
569
+ joint_attention_kwargs={"scale": 1.0},
570
+ output_type="pil",
571
+ ).images[0]
572
+ return final_image
573
  except Exception as e:
574
  print(e)
575
  raise gr.Error(f"I2I Inference Error: {e}") from e
576
 
577
+ def run_lora(prompt, image_input, image_strength, task_type, blur_mask, blur_factor, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2,
578
+ randomize_seed, seed, width, height, loras_state, lora_json, cn_on, translate_on, progress=gr.Progress(track_tqdm=True)):
579
+ global pipe, pipe_i2i, pipe_ip
 
580
  if not selected_indices and not is_valid_lora(lora_json):
581
  gr.Info("LoRA isn't selected.")
582
  # raise gr.Error("You must select a LoRA before proceeding.")
 
584
 
585
  selected_loras = [loras_state[idx] for idx in selected_indices]
586
 
587
+ if task_type == "Inpainting":
588
+ is_inpaint = True
589
+ is_i2i = True
590
+ elif task_type == "Image-to-Image":
591
+ is_inpaint = False
592
+ is_i2i = True
593
+ else: # "Text-to-Image"
594
+ is_inpaint = False
595
+ is_i2i = False
596
+
597
  if translate_on: prompt = translate_to_en(prompt)
598
 
599
  # Build the prompt with trigger words
 
615
 
616
  print(pipe.get_active_adapters()) #
617
  print(pipe_i2i.get_active_adapters()) #
618
+ print(pipe_ip.get_active_adapters()) #
619
 
620
  clear_cache() #
621
 
 
625
  lora_weights = []
626
  if is_valid_lora(lora_json): # Load External LoRA weights
627
  with calculateDuration("Loading External LoRA weights"):
628
+ if is_inpaint:
629
+ pipe_ip, lora_names, lora_weights = fuse_loras(pipe_ip, lora_json)
630
+ elif is_i2i:
631
+ pipe_i2i, lora_names, lora_weights = fuse_loras(pipe_i2i, lora_json)
632
  else: pipe, lora_names, lora_weights = fuse_loras(pipe, lora_json)
633
  trigger_word = get_trigger_word(lora_json)
634
  prompt_mash = f"{prompt_mash} {trigger_word}"
 
645
  lora_path = lora['repo']
646
  weight_name = lora.get("weights")
647
  print(f"Lora Path: {lora_path}")
648
+ if is_inpaint:
649
+ pipe_ip.load_lora_weights(
650
+ lora_path,
651
+ weight_name=weight_name if weight_name else None,
652
+ low_cpu_mem_usage=False,
653
+ adapter_name=lora_name,
654
+ token=HF_TOKEN
655
+ )
656
+ elif is_i2i:
657
  pipe_i2i.load_lora_weights(
658
  lora_path,
659
  weight_name=weight_name if weight_name else None,
 
671
  )
672
  print("Loaded LoRAs:", lora_names)
673
  if selected_indices or is_valid_lora(lora_json):
674
+ if is_inpaint:
675
+ pipe_ip.set_adapters(lora_names, adapter_weights=lora_weights)
676
+ elif is_i2i:
677
  pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
678
  else:
679
  pipe.set_adapters(lora_names, adapter_weights=lora_weights)
680
 
681
  print(pipe.get_active_adapters()) #
682
  print(pipe_i2i.get_active_adapters()) #
683
+ print(pipe_ip.get_active_adapters()) #
684
 
685
  # Set random seed for reproducibility
686
  with calculateDuration("Randomizing seed"):
 
689
 
690
  # Generate image
691
  progress(0, desc="Running Inference.")
692
+ if is_i2i:
693
+ final_image = generate_image_to_image(prompt_mash, image_input, image_strength, is_inpaint, blur_mask, blur_factor, steps, cfg_scale, width, height, seed, cn_on)
694
  yield save_image(final_image, None, last_model, prompt_mash, height, width, steps, cfg_scale, seed), seed, gr.update(visible=False)
695
  else:
696
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, cn_on)
 
872
  with gr.Row():
873
  with gr.Accordion("Advanced Settings", open=False):
874
  with gr.Row():
 
875
  with gr.Column():
876
+ #input_image = gr.Image(label="Input image", type="filepath", height=256, sources=["upload", "clipboard"], show_share_button=False)
877
+ input_image = gr.ImageEditor(label='Input image', type='filepath', sources=["upload", "clipboard"], image_mode='RGB', show_share_button=False, show_fullscreen_button=False,
878
+ layers=False, brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed", default_size=32), value=None,
879
+ canvas_size=(384, 384), width=384, height=512)
880
+ with gr.Column():
881
+ task_type = gr.Radio(label="Task", choices=["Text-to-Image", "Image-to-Image", "Inpainting"], value="Text-to-Image")
882
+ image_strength = gr.Slider(label="Strength", info="Lower means more image influence in I2I, opposite in Inpaint", minimum=0.01, maximum=1.0, step=0.01, value=0.75)
883
+ blur_mask = gr.Checkbox(label="Blur mask", value=False)
884
+ blur_factor = gr.Slider(label="Blur factor", minimum=0, maximum=50, step=1, value=33)
885
  input_image_preprocess = gr.Checkbox(True, label="Preprocess Input image")
886
  with gr.Column():
887
  with gr.Row():
 
999
  trigger_mode="once",
1000
  ).success(
1001
  fn=run_lora,
1002
+ inputs=[prompt, input_image, image_strength, task_type, blur_mask, blur_factor, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2,
1003
  randomize_seed, seed, width, height, loras_state, lora_repo_json, cn_on, auto_trans],
1004
  outputs=[result, seed, progress_bar],
1005
  queue=True,
 
1010
  # outputs=history_gallery,
1011
  ).success(save_image_history, [result, history_gallery, history_files, model_name], [history_gallery, history_files], queue=False, show_api=False)
1012
 
1013
+ input_image.clear(lambda: gr.update(value="Text-to-Image"), None, [task_type], queue=False, show_api=False)
1014
+ input_image.upload(preprocess_i2i_image, [input_image, input_image_preprocess, height, width], [input_image], queue=False, show_api=False)\
1015
+ .success(lambda: gr.update(value="Image-to-Image"), None, [task_type], queue=False, show_api=False)
1016
  gr.on(
1017
  triggers=[model_name.change, cn_on.change],
1018
  fn=get_t2i_model_info,
 
1021
  queue=False,
1022
  show_api=False,
1023
  trigger_mode="once",
1024
+ )#.then(change_base_model, [model_name, cn_on, disable_model_cache, model_type], [result], queue=True, show_api=False)
1025
  prompt_enhance.click(enhance_prompt, [prompt], [prompt], queue=False, show_api=False)
1026
 
1027
  gr.on(
 
1266
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
1267
 
1268
  app.queue()
1269
+ app.launch(ssr_mode=False)
mod.py CHANGED
@@ -14,7 +14,7 @@ from modutils import download_things
14
 
15
 
16
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
17
- subprocess.run('pip cache purge', shell=True)
18
  device = "cuda" if torch.cuda.is_available() else "cpu"
19
  torch.set_grad_enabled(False)
20
 
@@ -232,16 +232,16 @@ def set_control_union_image(i: int, mode: str, image: Image.Image | None, height
232
  return control_images[i]
233
 
234
 
235
- def preprocess_i2i_image(image_path: str, is_preprocess: bool, height: int, width: int):
236
  try:
237
- if not is_preprocess: return image_path
 
238
  image_resolution = max(width, height)
239
  image = Image.open(image_path)
240
  image_resized = resize_image(expand2square(image.convert("RGB")), image_resolution, image_resolution, False)
241
- image_resized.save(image_path)
242
  except Exception as e:
243
  raise gr.Error(f"Error: {e}")
244
- return image_path
245
 
246
 
247
  def compose_lora_json(lorajson: list[dict], i: int, name: str, scale: float, filename: str, trigger: str):
@@ -312,7 +312,8 @@ def description_ui():
312
  [multimodalart/flux-lora-lab](https://huggingface.co/spaces/multimodalart/flux-lora-lab),
313
  [jiuface/FLUX.1-dev-Controlnet-Union](https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union),
314
  [DamarJati/FLUX.1-DEV-Canny](https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny),
315
- [gokaygokay/FLUX-Prompt-Generator](https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator).
 
316
  """
317
  )
318
 
 
14
 
15
 
16
  subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
17
+ #subprocess.run('pip cache purge', shell=True)
18
  device = "cuda" if torch.cuda.is_available() else "cpu"
19
  torch.set_grad_enabled(False)
20
 
 
232
  return control_images[i]
233
 
234
 
235
+ def preprocess_i2i_image(image_path_dict: dict, is_preprocess: bool, height: int, width: int):
236
  try:
237
+ if not is_preprocess: return gr.update()
238
+ image_path = image_path_dict['background']
239
  image_resolution = max(width, height)
240
  image = Image.open(image_path)
241
  image_resized = resize_image(expand2square(image.convert("RGB")), image_resolution, image_resolution, False)
 
242
  except Exception as e:
243
  raise gr.Error(f"Error: {e}")
244
+ return gr.update(value=image_resized)
245
 
246
 
247
  def compose_lora_json(lorajson: list[dict], i: int, name: str, scale: float, filename: str, trigger: str):
 
312
  [multimodalart/flux-lora-lab](https://huggingface.co/spaces/multimodalart/flux-lora-lab),
313
  [jiuface/FLUX.1-dev-Controlnet-Union](https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union),
314
  [DamarJati/FLUX.1-DEV-Canny](https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny),
315
+ [gokaygokay/FLUX-Prompt-Generator](https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator),
316
+ [Sham786/flux-inpainting-with-lora](https://huggingface.co/spaces/Sham786/flux-inpainting-with-lora).
317
  """
318
  )
319