import gradio as gr import torch import diffusers import os hf_token = os.environ.get("HF_TOKEN") import spaces from diffusers import StableDiffusionXLInpaintPipeline, DDIMScheduler, UNet2DConditionModel device = "cuda" if torch.cuda.is_available() else "cpu" unet = UNet2DConditionModel.from_pretrained( "briaai/BRIA-2.3-Inpainting", subfolder="unet", torch_dtype=torch.float16, ) scheduler = DDIMScheduler.from_pretrained("briaai/BRIA-2.3", subfolder="scheduler", rescale_betas_zero_snr=True,prediction_type='v_prediction',timestep_spacing="trailing",clip_sample=False) pipe = StableDiffusionXLInpaintPipeline.from_pretrained( "briaai/BRIA-2.3", unet=unet, scheduler=scheduler, torch_dtype=torch.float16, force_zeros_for_empty_prompt=False ) pipe = pipe.to(device) pipe.force_zeros_for_empty_prompt = False default_negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers" def read_content(file_path: str) -> str: """read the content of target file """ with open(file_path, 'r', encoding='utf-8') as f: content = f.read() return content @spaces.GPU() def predict(dict, prompt="", negative_prompt="", guidance_scale=7.5, steps=20, strength=1.0, scheduler="EulerDiscreteScheduler"): if negative_prompt == "": negative_prompt = None init_image = dict["image"].convert("RGB").resize((1024, 1024)) mask = dict["mask"].convert("RGB").resize((1024, 1024)) output = pipe(prompt = prompt, negative_prompt=negative_prompt, image=init_image, mask_image=mask, guidance_scale=guidance_scale, num_inference_steps=int(steps), strength=strength) return output.images[0], gr.update(visible=True) css = ''' .gradio-container{max-width: 1100px !important} #image_upload{min-height:400px} #image_upload [data-testid="image"], #image_upload [data-testid="image"] > div{min-height: 400px} #mask_radio .gr-form{background:transparent; border: none} #word_mask{margin-top: .75em !important} #word_mask textarea:disabled{opacity: 0.3} .footer {margin-bottom: 45px;margin-top: 35px;text-align: center;border-bottom: 1px solid #e5e5e5} .footer>p {font-size: .8rem; display: inline-block; padding: 0 10px;transform: translateY(10px);background: white} .dark .footer {border-color: #303030} .dark .footer>p {background: #0b0f19} .acknowledgments h4{margin: 1.25em 0 .25em 0;font-weight: bold;font-size: 115%} #image_upload .touch-none{display: flex} @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } #share-btn-container {padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; max-width: 13rem; margin-left: auto;} div#share-btn-container > div {flex-direction: row;background: black;align-items: center} #share-btn-container:hover {background-color: #060606} #share-btn {all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.5rem !important; padding-bottom: 0.5rem !important;right:0;} #share-btn * {all: unset} #share-btn-container div:nth-child(-n+2){width: auto !important;min-height: 0px !important;} #share-btn-container .wrap {display: none !important} #share-btn-container.hidden {display: none!important} #prompt input{width: calc(100% - 160px);border-top-right-radius: 0px;border-bottom-right-radius: 0px;} #run_button{position:absolute;margin-top: 11px;right: 0;margin-right: 0.8em;border-bottom-left-radius: 0px; border-top-left-radius: 0px;} #prompt-container{margin-top:-18px;} #prompt-container .form{border-top-left-radius: 0;border-top-right-radius: 0} #image_upload{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px} ''' image_blocks = gr.Blocks(css=css, elem_id="total-container") with image_blocks as demo: with gr.Column(elem_id="col-container"): gr.Markdown("## BRIA 2.3") gr.HTML('''

This is a demo for BRIA 2.3 text-to-image . BRIA 2.3 improve the generation of humans and illustrations compared to BRIA 2.2 while still trained on licensed data, and so provide full legal liability coverage for copyright and privacy infringement.

''') with gr.Row(): with gr.Column(): image = gr.Image(label="Input Image", interactive=True, elem_id="input_image") with gr.Row(elem_id="prompt-container", mobile_collapse=False, equal_height=True): with gr.Row(): prompt = gr.Textbox(placeholder="Your prompt (what you want in place of what is erased)", show_label=False, elem_id="prompt") btn = gr.Button("Inpaint!", elem_id="run_button") with gr.Accordion(label="Advanced Settings", open=False): with gr.Row(mobile_collapse=False, equal_height=True): guidance_scale = gr.Number(value=7.5, minimum=1.0, maximum=10.0, step=0.5, label="guidance_scale") steps = gr.Number(value=30, minimum=20, maximum=50, step=1, label="steps") strength = gr.Number(value=0.99, minimum=0.01, maximum=1.0, step=0.01, label="strength") negative_prompt = gr.Textbox(label="negative_prompt", value=default_negative_prompt, placeholder=default_negative_prompt, info="what you don't want to see in the image") with gr.Column(): image_out = gr.Image(label="Output", elem_id="output-img", height=400) btn.click(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, strength, scheduler], outputs=[image_out, share_btn_container], api_name='run') prompt.submit(fn=predict, inputs=[image, prompt, negative_prompt, guidance_scale, steps, strength, scheduler], outputs=[image_out, share_btn_container]) gr.Examples( examples=[ ["./imgs/example.png"], ], fn=predict, inputs=[image], cache_examples=False, ) gr.HTML( """ """ ) image_blocks.queue(max_size=25,api_open=False).launch(show_api=False)