salomonsky commited on
Commit
e3be785
1 Parent(s): 2fc432b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -24
app.py CHANGED
@@ -12,6 +12,7 @@ from gradio_client import Client, handle_file
12
  from huggingface_hub import login
13
  from gradio_imageslider import ImageSlider
14
 
 
15
  translator = Translator()
16
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
17
  basemodel = "black-forest-labs/FLUX.1-schnell"
@@ -19,44 +20,78 @@ MAX_SEED = np.iinfo(np.int32).max
19
  CSS = "footer { visibility: hidden; }"
20
  JS = "function () { gradioURL = window.location.href; if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }"
21
 
22
- def enable_lora(lora_add): return basemodel if not lora_add else lora_add
 
 
 
 
23
  async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
24
- if seed == -1: seed = random.randint(0, MAX_SEED)
 
25
  seed = int(seed)
26
  text = str(translator.translate(prompt, 'English')) + "," + lora_word
27
  client = AsyncInferenceClient()
28
- try: image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
29
- except Exception as e: raise gr.Error(f"Error in {e}")
 
 
30
  return image, seed
31
 
32
- async def gen(prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, progress):
 
33
  model = enable_lora(lora_add)
34
  image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
35
  image_path = "temp_image.png"
36
  image.save(image_path)
37
- upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
38
- return upscale_image, seed
 
 
 
 
 
 
39
 
40
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
41
  client = Client("finegrain/finegrain-image-enhancer")
42
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
43
  return result[1]
44
 
45
- with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
46
- gr.HTML("<h1><center>Flux Lab Light</center></h1>");
47
- with gr.Row():
48
- with gr.Column(scale=4):
49
- with gr.Row(): img = gr.Image(type="filepath", label='flux Generated Image', height=600);
50
- with gr.Row(): prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6); sendBtn = gr.Button(scale=1, variant='primary');
51
- with gr.Accordion("Advanced Options", open=True):
52
- with gr.Column(scale=1):
53
- width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=768);
54
- height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=1024);
55
- scales = gr.Slider(label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5);
56
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=24);
57
- seed = gr.Slider(label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=-1);
58
- lora_add = gr.Textbox(label="Add Flux LoRA", info="Copy the HF LoRA model name here", lines=1, placeholder="Please use Warm status model");
59
- lora_word = gr.Textbox(label="Add Flux LoRA Trigger Word", info="Add the Trigger Word", lines=1, value="");
 
 
 
 
 
 
 
 
60
  upscale_factor = gr.Radio(label="UpScale Factor", choices=[2, 3, 4], value=2, scale=2)
61
- gr.on([prompt.submit, sendBtn.click], gen, [prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor], [img, seed])
62
- demo.queue(api_open=False).launch(show_api=False, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  from huggingface_hub import login
13
  from gradio_imageslider import ImageSlider
14
 
15
+
16
  translator = Translator()
17
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
18
  basemodel = "black-forest-labs/FLUX.1-schnell"
 
20
  CSS = "footer { visibility: hidden; }"
21
  JS = "function () { gradioURL = window.location.href; if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }"
22
 
23
+
24
+ def enable_lora(lora_add):
25
+ return basemodel if not lora_add else lora_add
26
+
27
+
28
  async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
29
+ if seed == -1:
30
+ seed = random.randint(0, MAX_SEED)
31
  seed = int(seed)
32
  text = str(translator.translate(prompt, 'English')) + "," + lora_word
33
  client = AsyncInferenceClient()
34
+ try:
35
+ image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
36
+ except Exception as e:
37
+ raise gr.Error(f"Error in {e}")
38
  return image, seed
39
 
40
+
41
+ async def gen(prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale):
42
  model = enable_lora(lora_add)
43
  image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
44
  image_path = "temp_image.png"
45
  image.save(image_path)
46
+
47
+ if process_upscale:
48
+ upscale_image = get_upscale_finegrain(prompt, image_path, upscale_factor)
49
+ else:
50
+ upscale_image = image_path
51
+
52
+ return [image_path, upscale_image]
53
+
54
 
55
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
56
  client = Client("finegrain/finegrain-image-enhancer")
57
  result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
58
  return result[1]
59
 
60
+
61
+ css = """
62
+ #col-container{
63
+ margin: 0 auto;
64
+ max-width: 1024px;
65
+ }
66
+ """
67
+
68
+
69
+ with gr.Blocks(css=css) as demo:
70
+ with gr.Column(elem_id="col-container"):
71
+ gr.Markdown("# Flux Upscaled")
72
+ gr.Markdown("Step 1: Generate image with FLUX schnell; Step 2: UpScale with Finegrain Image-Enhancer")
73
+ with gr.Group():
74
+ prompt = gr.Textbox(label="Prompt")
75
+ with gr.Row():
76
+ lora_add = gr.Textbox(label="Add Flux LoRA", info="Copy the HF LoRA model name here", lines=1, placeholder="Please use Warm status model")
77
+ lora_word = gr.Textbox(label="Add Flux LoRA Trigger Word", info="Add the Trigger Word", lines=1, value="")
78
+ width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=768)
79
+ height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=1024)
80
+ scales = gr.Slider(label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5)
81
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=24)
82
+ seed = gr.Slider(label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
83
  upscale_factor = gr.Radio(label="UpScale Factor", choices=[2, 3, 4], value=2, scale=2)
84
+ process_upscale = gr.Checkbox(label="Process Upscale", value=True)
85
+ submit_btn = gr.Button("Submit", scale=1)
86
+ output_res = ImageSlider(label="Flux / Upscaled")
87
+
88
+ submit_btn.click(
89
+ fn=lambda: None,
90
+ inputs=None,
91
+ outputs=[output_res],
92
+ queue=False
93
+ ).then(
94
+ fn=gen,
95
+ inputs=[prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor, process_upscale],
96
+ outputs=[output_res]
97
+ )