Upload 2 files
Browse files
app.py
CHANGED
@@ -66,6 +66,7 @@ def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height,
|
|
66 |
pipe.to("cuda")
|
67 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
68 |
|
|
|
69 |
with calculateDuration("Generating image"):
|
70 |
# Generate image
|
71 |
image = pipe(
|
@@ -84,6 +85,7 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
|
|
84 |
if selected_index is None and not is_valid_lora(lora_json):
|
85 |
gr.Info("LoRA isn't selected.")
|
86 |
# raise gr.Error("You must select a LoRA before proceeding.")
|
|
|
87 |
|
88 |
if is_valid_lora(lora_json):
|
89 |
with calculateDuration("Loading LoRA weights"):
|
|
|
66 |
pipe.to("cuda")
|
67 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
68 |
|
69 |
+
progress(0, desc="Start Inference.")
|
70 |
with calculateDuration("Generating image"):
|
71 |
# Generate image
|
72 |
image = pipe(
|
|
|
85 |
if selected_index is None and not is_valid_lora(lora_json):
|
86 |
gr.Info("LoRA isn't selected.")
|
87 |
# raise gr.Error("You must select a LoRA before proceeding.")
|
88 |
+
progress(0, desc="Preparing Inference.")
|
89 |
|
90 |
if is_valid_lora(lora_json):
|
91 |
with calculateDuration("Loading LoRA weights"):
|
mod.py
CHANGED
@@ -78,11 +78,11 @@ def change_base_model(repo_id: str, progress=gr.Progress(track_tqdm=True)):
|
|
78 |
global last_model
|
79 |
try:
|
80 |
if repo_id == last_model or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return
|
81 |
-
progress(0, f"Loading model: {repo_id}")
|
82 |
clear_cache()
|
83 |
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
|
84 |
last_model = repo_id
|
85 |
-
progress(1, f"Model loaded: {repo_id}")
|
86 |
except Exception as e:
|
87 |
print(e)
|
88 |
return gr.update(visible=True)
|
|
|
78 |
global last_model
|
79 |
try:
|
80 |
if repo_id == last_model or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return
|
81 |
+
progress(0, desc=f"Loading model: {repo_id}")
|
82 |
clear_cache()
|
83 |
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.bfloat16)
|
84 |
last_model = repo_id
|
85 |
+
progress(1, desc=f"Model loaded: {repo_id}")
|
86 |
except Exception as e:
|
87 |
print(e)
|
88 |
return gr.update(visible=True)
|