DigiP-AI commited on
Commit
c7fe223
·
verified ·
1 Parent(s): d5b3011

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -36
app.py CHANGED
@@ -188,44 +188,25 @@ with gr.Blocks(theme=theme, css=css, elem_id="app-container") as app:
188
 
189
  text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output])
190
 
191
- with gr.Tab("Image to Prompt"):
192
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
 
 
 
 
 
193
 
194
- # Initialize Florence model
195
- device = "cuda" if torch.cuda.is_available() else "cpu"
196
- florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
197
- florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
198
-
199
- # api_key = os.getenv("HF_READ_TOKEN")
200
-
201
- def generate_caption(image):
202
- if not isinstance(image, Image.Image):
203
- image = Image.fromarray(image)
204
 
205
- inputs = florence_processor(text="<MORE_DETAILED_CAPTION>", images=image, return_tensors="pt").to(device)
206
- generated_ids = florence_model.generate(
207
- input_ids=inputs["input_ids"],
208
- pixel_values=inputs["pixel_values"],
209
- max_new_tokens=1024,
210
- early_stopping=False,
211
- do_sample=False,
212
- num_beams=3,
213
- )
214
- generated_text = florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
215
- parsed_answer = florence_processor.post_process_generation(
216
- generated_text,
217
- task="<MORE_DETAILED_CAPTION>",
218
- image_size=(image.width, image.height)
219
- )
220
- prompt = parsed_answer["<MORE_DETAILED_CAPTION>"]
221
- print("\n\nGeneration completed!:"+ prompt)
222
- return prompt
223
 
224
- io = gr.Interface(generate_caption,
225
- inputs=[gr.Image(label="Input Image")],
226
- outputs = [gr.Textbox(label="Output Prompt", lines=2, show_copy_button = True),
227
- # gr.Image(label="Output Image")
228
- ]
229
- )
230
 
231
  app.launch(show_api=False, share=False)
 
188
 
189
  text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output])
190
 
191
+ with gr.Tab("Flip Image"):
192
+ with gr.Row():
193
+ image_input = gr.Image(type="numpy", label="Upload Image")
194
+ image_output = gr.Image(format="png")
195
+ with gr.Row():
196
+ image_button = gr.Button("Run", variant='primary')
197
+ image_button.click(flip_image, inputs=image_input, outputs=image_output)
198
 
199
+ with gr.Tab("Image Upscaler"):
200
+ with gr.Row():
201
+ with gr.Column():
202
+ def upscale_image(input_image, radio_input):
203
+ upscale_factor = radio_input
204
+ output_image = cv2.resize(input_image, None, fx = upscale_factor, fy = upscale_factor, interpolation = cv2.INTER_CUBIC)
205
+ return output_image
206
+
207
+ radio_input = gr.Radio(label="Upscale Levels", choices=[2, 4, 6, 8, 10], value=2)
 
208
 
209
+ iface = gr.Interface(fn=upscale_image, inputs = [gr.Image(label="Input Image", interactive=True), radio_input], outputs = gr.Image(label="Upscaled Image", format="png"), title="Image Upscaler")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
 
 
 
 
 
 
211
 
212
  app.launch(show_api=False, share=False)