Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,501 Bytes
bce439c 0e14842 bce439c 0e14842 bce439c fda85af 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 fda85af bce439c fda85af bce439c 0e14842 bce439c 0e14842 bce439c a2b0660 0e14842 bce439c 0e14842 95f1c55 ce51fdb 95f1c55 bce439c 7a77fc4 ce51fdb 7a77fc4 6e97af5 6881507 6e97af5 6881507 6e97af5 ce51fdb 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c 0e14842 bce439c fda85af 8937a7d 967d51e 8937a7d edf7d8c a2b0660 8937a7d bce439c ce51fdb 0a94726 0e14842 bce439c fda85af bce439c 0e14842 fda85af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
import random
import gradio as gr
import numpy as np
import spaces
import torch
from diffusers import DiffusionPipeline
from PIL import Image
device = "cuda" if torch.cuda.is_available() else "cpu"
repo_id = "black-forest-labs/FLUX.1-dev"
adapter_id = "alvarobartt/ghibli-characters-flux-lora"
pipeline = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
pipeline.load_lora_weights(adapter_id)
pipeline = pipeline.to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
@spaces.GPU(duration=120)
def inference(
prompt: str,
seed: int,
randomize_seed: bool,
width: int,
height: int,
guidance_scale: float,
num_inference_steps: int,
lora_scale: float,
progress: gr.Progress = gr.Progress(track_tqdm=True),
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
image = pipeline(
prompt=prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
).images[0]
return image, seed
examples = [
(
"Ghibli style futuristic stormtrooper with glossy white armor and a sleek helmet,"
" standing heroically on a lush alien planet, vibrant flowers blooming around, soft"
" sunlight illuminating the scene, a gentle breeze rustling the leaves"
),
]
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# FLUX.1 Studio Ghibli LoRA")
gr.Markdown(
"LoRA fine-tune of [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)"
" with [alvarobartt/ghibli-characters](https://huggingface.co/datasets/alvarobartt/ghibli-characters)."
)
with gr.Accordion("How to generate nice prompts?", open=False):
gr.Markdown(
"What worked best for me to generate high-quality prompts, was to prompt"
" [Claude 3 Haiku](https://claude.ai) with the following:\n\nYou are an"
" expert prompt writer for diffusion text to image models, and you've been provided"
" the following prompt template:\n\n\"Ghibli style [character description] with"
" [distinctive features], [action or pose], [environment or background],"
" [lighting or atmosphere], [additional details].\"\n\nCould you create a prompt"
" to generate [CHARACTER NAME] as a Studio Ghibli character following that template?"
" [MORE DETAILS IF NEEDED]\n"
)
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=42,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=768,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=3.5,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=30,
)
lora_scale = gr.Slider(
label="LoRA scale",
minimum=0.0,
maximum=1.0,
step=0.1,
value=1.0,
)
gr.Examples(
examples=examples,
fn=lambda x: (Image.open("./example.jpg"), 42),
inputs=[prompt],
outputs=[result, seed],
run_on_click=True,
)
gr.Markdown(
"### Disclaimer\n\n"
"License is non-commercial for both FLUX.1-dev and the Studio Ghibli dataset;"
" but free to use for personal and non-commercial purposes."
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn=inference,
inputs=[
prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
lora_scale,
],
outputs=[result, seed],
)
demo.queue()
demo.launch() |