import spaces import gradio as gr from PIL import Image import torch from diffusion import DiffusionPipeline device = "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline(device) def read_content(file_path: str) -> str: """read the content of target file """ with open(file_path, 'r', encoding='utf-8') as f: content = f.read() return content @spaces.GPU def predict(input, dkernel, diffusion_step, q=False): lq = input["image"].convert("RGB") mask = input["mask"].convert("RGB") mask = mask.resize(lq.size, resample=Image.NEAREST) output = pipe(lq=lq, mask=mask, dkernel=dkernel, diffusion_step=diffusion_step) return output @spaces.GPU def qpredict(input, dkernel, diffusion_step, q=False): lq = input["image"].convert("RGB") mask = input["mask"].convert("RGB") mask = mask.resize(lq.size, resample=Image.NEAREST) for output in pipe.quick_solve(lq=lq, mask=mask, dkernel=dkernel, diffusion_step=diffusion_step): yield output css = ''' .container {max-width: 1150px;margin: auto;padding-top: 1.5rem} #image_upload{min-height:400px} #image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 400px} #mask_radio .gr-form{background:transparent; border: none} #word_mask{margin-top: .75em !important} #word_mask textarea:disabled{opacity: 0.3} .footer {margin-bottom: 45px;margin-top: 35px;text-align: center;border-bottom: 1px solid #e5e5e5} .footer>p {font-size: .8rem; display: inline-block; padding: 0 10px;transform: translateY(10px);background: white} .dark .footer {border-color: #303030} .dark .footer>p {background: #0b0f19} .acknowledgments h4{margin: 1.25em 0 .25em 0;font-weight: bold;font-size: 115%} #image_upload .touch-none{display: flex} @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } ''' image_blocks = gr.Blocks(css=css) with image_blocks as demo: gr.HTML(read_content("header.html")) with gr.Group(): with gr.Group(): with gr.Row(): with gr.Column(): im = gr.ImageEditor( type="pil", crop_size="1:1", elem_id="image_upload", label="Shadow Image" ).style(height=400) # image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Shadow Image").style(height=400) dkernel = gr.Slider(minimum=11, maximum=55, step=2, value=11, label="Dilation Kernel Size") diffusion_step = gr.Slider(minimum=10, maximum=200, step=5, value=20, label="Diffusion Time Step") with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True): with gr.Column(): btn = gr.Button("Removal").style( margin=False, full_width=True, ) with gr.Column(): qbtn = gr.Button("Quick Removal").style( margin=False, full_width=True, ) with gr.Column(): image_out = gr.Image(label="Removal Result", elem_id="output-img") with gr.Row(): gr.Examples(examples=[ 'examples/lssd2025.jpg', 'examples/web-shadow0248.jpg', ], inputs=[image]) btn.click(fn=predict, inputs=[image, dkernel, diffusion_step], outputs=[image_out]) qbtn.click(fn=qpredict, inputs=[image, dkernel, diffusion_step], outputs=[image_out]) image_blocks.launch(enable_queue=True)