B-LoRa-trainer / app.py
fffiloni's picture
Update app.py
e7cd840 verified
import gradio as gr
import torch
import os
import shutil
import requests
import subprocess
from subprocess import getoutput
import webbrowser
from huggingface_hub import snapshot_download, HfApi
api = HfApi()
hf_token = os.environ.get("HF_TOKEN_WITH_WRITE_PERMISSION")
is_shared_ui = True if "fffiloni/B-LoRa-trainer" in os.environ['SPACE_ID'] else False
is_gpu_associated = torch.cuda.is_available()
if is_gpu_associated:
gpu_info = getoutput('nvidia-smi')
if("A10G" in gpu_info):
which_gpu = "A10G"
elif("T4" in gpu_info):
which_gpu = "T4"
else:
which_gpu = "CPU"
def change_training_setup(training_type):
if training_type == "style" :
return 1000, 500
elif training_type == "concept" :
return 2000, 1000
def swap_hardware(hf_token, hardware="cpu-basic"):
hardware_url = f"https://huggingface.co/spaces/{os.environ['SPACE_ID']}/hardware"
headers = { "authorization" : f"Bearer {hf_token}"}
body = {'flavor': hardware}
requests.post(hardware_url, json = body, headers=headers)
def swap_sleep_time(sleep_time):
if sleep_time == "5 minutes":
new_sleep_time = 300
elif sleep_time == "15 minutes":
new_sleep_time = 900
elif sleep_time == "30 minutes":
new_sleep_time = 1800
elif sleep_time == "1 hour":
new_sleep_time = 3600
elif sleep_time == "10 hours":
new_sleep_time = 36000
elif sleep_time == "24 hours":
new_sleep_time = 86400
elif sleep_time == "48 hours":
new_sleep_time = 172800
elif sleep_time == "72 hours":
new_sleep_time = 259200
elif sleep_time == "1 week":
new_sleep_time = 604800
elif sleep_time == "Don't sleep":
new_sleep_time = -1
sleep_time_url = f"https://huggingface.co/api/spaces/{os.environ['SPACE_ID']}/sleeptime"
headers = { "authorization" : f"Bearer {hf_token}"}
body = {'seconds':new_sleep_time}
requests.post(sleep_time_url,json=body,headers=headers)
def get_sleep_time():
sleep_time_url = f"https://huggingface.co/api/spaces/{os.environ['SPACE_ID']}"
headers = { "authorization" : f"Bearer {hf_token}"}
response = requests.get(sleep_time_url,headers=headers)
try:
gcTimeout = response.json()['runtime']['gcTimeout']
except:
gcTimeout = None
return gcTimeout
def check_sleep_time():
sleep_time = get_sleep_time()
if sleep_time is None :
sleep_time_value = "Don't sleep"
return sleep_time_value, gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
elif sleep_time >= 3600:
if sleep_time == 3600:
sleep_time_value = "1 hour"
elif sleep_time == 36000:
sleep_time_value = "10 hours"
elif sleep_time == 86400:
sleep_time_value = "24 hours"
elif sleep_time == 172800:
sleep_time_value = "48 hours"
elif sleep_time == 259200:
sleep_time_value = "72 hours"
elif sleep_time == 604800:
sleep_time_value = "1 week"
return sleep_time_value, gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
else :
if sleep_time == 300:
sleep_time_value = "5 minutes"
elif sleep_time == 900:
sleep_time_value = "15 minutes"
elif sleep_time == 1800:
sleep_time_value = "30 minutes"
return sleep_time_value, gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
def train_dreambooth_blora_sdxl(instance_data_dir, b_lora_trained_folder, instance_prompt, max_train_steps, checkpoint_steps):
script_filename = "train_dreambooth_b-lora_sdxl.py" # Assuming it's in the same folder
command = [
"accelerate",
"launch",
script_filename, # Use the local script
"--pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0",
f"--instance_data_dir={instance_data_dir}",
f"--output_dir={b_lora_trained_folder}",
f"--instance_prompt='{instance_prompt}'",
#f"--class_prompt={class_prompt}",
f"--validation_prompt={instance_prompt} in {instance_prompt} style",
"--num_validation_images=1",
"--validation_epochs=500",
"--resolution=1024",
"--rank=64",
"--train_batch_size=1",
"--learning_rate=5e-5",
"--lr_scheduler=constant",
"--lr_warmup_steps=0",
f"--max_train_steps={max_train_steps}",
f"--checkpointing_steps={checkpoint_steps}",
"--seed=0",
"--gradient_checkpointing",
"--use_8bit_adam",
"--mixed_precision=fp16",
"--push_to_hub",
f"--hub_token={hf_token}"
]
try:
subprocess.run(command, check=True)
print("Training is finished!")
except subprocess.CalledProcessError as e:
print(f"An error occurred: {e}")
def clear_directory(directory_path):
# Check if the directory exists
if os.path.exists(directory_path):
# Iterate over all the files and directories inside the specified directory
for filename in os.listdir(directory_path):
file_path = os.path.join(directory_path, filename)
try:
# Check if it is a file or a directory and remove accordingly
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path) # Remove the file
elif os.path.isdir(file_path):
shutil.rmtree(file_path) # Remove the directory
except Exception as e:
print(f'Failed to delete {file_path}. Reason: {e}')
else:
print(f'The directory {directory_path} does not exist.')
def get_start_info(image_path, b_lora_name, instance_prompt):
if is_shared_ui:
raise gr.Error("This Space only works in duplicated instances")
if not is_gpu_associated:
raise gr.Error("Please associate a T4 or A10G GPU for this Space")
if image_path == None:
raise gr.Error("You forgot to specify an image reference")
if b_lora_name == "":
raise gr.Error("You forgot to specify a name for you model")
if instance_prompt == "":
raise gr.Error("You forgot to specify an instance prompt")
your_username = api.whoami(token=hf_token)["name"]
return gr.update(visible=True, value=f"https://hf.co/{your_username}/{b_lora_name}"), gr.update(visible=True)
def main(started_info, image_path, b_lora_trained_folder, instance_prompt, training_type, training_steps):
if started_info == None or started_info == "":
raise gr.Error("Training did not start.")
local_dir = "image_to_train"
# Check if the directory exists and create it if necessary
if not os.path.exists(local_dir):
os.makedirs(local_dir)
else :
directory_to_clear = local_dir
clear_directory(directory_to_clear)
shutil.copy(image_path, local_dir)
print(f"source image has been copied in {local_dir} directory")
if training_type == "style":
checkpoint_steps = 500
elif training_type == "concept" :
checkpoint_steps = 1000
max_train_steps = training_steps
train_dreambooth_blora_sdxl(local_dir, b_lora_trained_folder, instance_prompt, max_train_steps, checkpoint_steps)
your_username = api.whoami(token=hf_token)["name"]
#swap_hardware(hardware="cpu-basic")
swap_sleep_time("5 minutes")
return f"Done, your trained model has been stored in your models library: {your_username}/{b_lora_trained_folder}"
css = """
#col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
div#warning-ready {
background-color: #ecfdf5;
padding: 0 16px 16px;
margin: 20px 0;
color: #030303!important;
}
div#warning-ready > .gr-prose > h2, div#warning-ready > .gr-prose > p {
color: #057857!important;
}
div#warning-duplicate {
background-color: #ebf5ff;
padding: 0 16px 16px;
margin: 20px 0;
color: #030303!important;
}
div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
color: #0f4592!important;
}
div#warning-duplicate strong {
color: #0f4592;
}
p.actions {
display: flex;
align-items: center;
margin: 20px 0;
}
div#warning-duplicate .actions a {
display: inline-block;
margin-right: 10px;
}
div#warning-setgpu {
background-color: #fff4eb;
padding: 0 16px 16px;
margin: 20px 0;
color: #030303!important;
}
div#warning-setgpu > .gr-prose > h2, div#warning-setgpu > .gr-prose > p {
color: #92220f!important;
}
div#warning-setgpu a, div#warning-setgpu b {
color: #91230f;
}
div#warning-setgpu p.actions > a {
display: inline-block;
background: #1f1f23;
border-radius: 40px;
padding: 6px 24px;
color: antiquewhite;
text-decoration: none;
font-weight: 600;
font-size: 1.2em;
}
div#warning-setsleeptime {
background-color: #fff4eb;
padding: 10px 10px;
margin: 0!important;
color: #030303!important;
}
.custom-color {
color: #030303 !important;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
if is_shared_ui:
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
Attention: this Space need to be duplicated to work</h2>
<p class="main-message custom-color">
To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU (T4-small or A10G-small).<br />
A T4 costs <strong>US$0.60/h</strong>, so it should cost < US$1 to train most models.
</p>
<p class="actions custom-color">
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
</a>
to start training your own B-LoRa model
</p>
</div>
''', elem_id="warning-duplicate")
else:
if(is_gpu_associated):
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
You have successfully associated a {which_gpu} GPU to the B-LoRa Training Space πŸŽ‰</h2>
<p class="custom-color">
You can now train your model! You will be billed by the minute from when you activated the GPU until when it is turned off.
</p>
</div>
''', elem_id="warning-ready")
else:
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
You have successfully duplicated the B-LoRa Training Space πŸŽ‰</h2>
<p class="custom-color">There's only one step left before you can train your model: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a <b>T4-small or A10G-small GPU</b> to it (via the Settings tab)</a> and run the training below.
You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
<p class="actions custom-color">
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">πŸ”₯ &nbsp; Set recommended GPU</a>
</p>
</div>
''', elem_id="warning-setgpu")
gr.Markdown("""
# B-LoRa Training UI πŸ’­
B-LoRa training method allows to perform high quality style-content mixing and even swapping the style and content between two stylized images, by implicitly decomposing a single image into its style and content representation.
[Learn more about Implicit Style-Content Separation using B-LoRA](https://b-lora.github.io/B-LoRA/)
""")
with gr.Row():
image = gr.Image(label="Image Reference", sources=["upload"], type="filepath")
with gr.Column():
sleep_time_message = gr.HTML('''
<div class="gr-prose">
<p>First of all, please make sure your space's sleep time value is set on long enough, so it do not fall asleep during training. </p>
<p>Set it to <strong>"Don't sleep"</strong> or <strong>more than 1 hour</strong> to be safe.</p>
<p>Don't worry, after training is finished, sleep time will be back to 5 minutes.</p>
</div>
''', elem_id="warning-setsleeptime")
with gr.Group():
current_sleep_time = gr.Dropdown(
label="current space sleep time",
choices = [
"Don't sleep", "5 minutes", "15 minutes", "30 minutes", "1 hour", "10 hours", "24 hours", "48 hours", "72 hours", "1 week"
],
filterable=False
)
training_type = gr.Radio(label="Training type", choices=["style", "concept"], value="style", visible=False)
b_lora_name = gr.Textbox(label="Name your B-LoRa model", placeholder="b_lora_trained_folder", visible=False)
with gr.Row():
instance_prompt = gr.Textbox(label="Create instance prompt", info="recommended standard B-LoRa is 'A [v]' format", placeholder="A [v42]", visible=False)
#class_prompt = gr.Textbox(label="Specify class prompt", placeholder="style | person | dog ", visible=False)
training_steps = gr.Number(label="Training steps", value=1000, interactive=False, visible=False)
checkpoint_step = gr.Number(label="checkpoint step", visible=False, value=500)
train_btn = gr.Button("Train B-LoRa", visible=False)
with gr.Row():
started_info = gr.Textbox(
label="Training has started",
info="You can open this space's logs to monitor logs training; once training is finished, your model will be available here:",
visible=False
)
status = gr.Textbox(label="status", visible=False)
current_sleep_time.change(
fn = swap_sleep_time,
inputs = current_sleep_time,
outputs = None,
show_api = False
)
demo.load(
fn = check_sleep_time,
inputs = None,
outputs = [current_sleep_time, sleep_time_message, b_lora_name, instance_prompt, training_type, training_steps, train_btn],
show_api = False
)
training_type.change(
fn = change_training_setup,
inputs = [training_type],
outputs = [training_steps, checkpoint_step],
show_api = False
)
train_btn.click(
fn = get_start_info,
inputs = [image, b_lora_name, instance_prompt],
outputs = [started_info, status],
show_api = False
).then(
fn = main,
inputs = [started_info, image, b_lora_name, instance_prompt, training_type, training_steps],
outputs = [status],
show_api = False
)
demo.launch(show_api=False, debug=True, show_error=True)