Update app.py
Browse files
app.py
CHANGED
@@ -67,18 +67,17 @@ def enable_lora(lora_scale, lora_in, lora_add):
|
|
67 |
url = f'https://huggingface.co/{lora_add}/tree/main'
|
68 |
lora_name = scrape_lora_link(url)
|
69 |
pipe.lora_lora_weights(lora_add, weight_name=lora_name)
|
70 |
-
pipe.fuse_lora(lora_scale=lora_scale)
|
71 |
-
pipe.to(device="cuda", dtype=torch.bfloat16)
|
72 |
|
73 |
@spaces.GPU()
|
74 |
def generate_image(
|
75 |
-
prompt,
|
76 |
-
width=768,
|
77 |
-
height=1024,
|
78 |
-
scales=5,
|
79 |
-
steps=24,
|
80 |
-
seed=-1,
|
81 |
-
nums=1,
|
82 |
progress=gr.Progress(track_tqdm=True)):
|
83 |
|
84 |
if seed == -1:
|
@@ -107,10 +106,10 @@ def generate_image(
|
|
107 |
|
108 |
|
109 |
def gen(
|
110 |
-
prompt,
|
111 |
width:int=768,
|
112 |
height:int=1024,
|
113 |
-
scales:
|
114 |
steps:int=24,
|
115 |
seed:int=-1,
|
116 |
nums:int=1,
|
@@ -119,6 +118,7 @@ def gen(
|
|
119 |
lora_add:str=""
|
120 |
):
|
121 |
enable_lora(lora_scale, lora_in, lora_add)
|
|
|
122 |
generate_image(prompt,width,height,scales,steps,seed,nums)
|
123 |
|
124 |
|
|
|
67 |
url = f'https://huggingface.co/{lora_add}/tree/main'
|
68 |
lora_name = scrape_lora_link(url)
|
69 |
pipe.lora_lora_weights(lora_add, weight_name=lora_name)
|
70 |
+
pipe.fuse_lora(lora_scale=lora_scale)
|
|
|
71 |
|
72 |
@spaces.GPU()
|
73 |
def generate_image(
|
74 |
+
prompt:str,
|
75 |
+
width:int=768,
|
76 |
+
height:int=1024,
|
77 |
+
scales:float=3.5,
|
78 |
+
steps:int=24,
|
79 |
+
seed:int=-1,
|
80 |
+
nums:int=1,
|
81 |
progress=gr.Progress(track_tqdm=True)):
|
82 |
|
83 |
if seed == -1:
|
|
|
106 |
|
107 |
|
108 |
def gen(
|
109 |
+
prompt:str,
|
110 |
width:int=768,
|
111 |
height:int=1024,
|
112 |
+
scales:float=3.5,
|
113 |
steps:int=24,
|
114 |
seed:int=-1,
|
115 |
nums:int=1,
|
|
|
118 |
lora_add:str=""
|
119 |
):
|
120 |
enable_lora(lora_scale, lora_in, lora_add)
|
121 |
+
pipe.to(device="cuda", dtype=torch.bfloat16)
|
122 |
generate_image(prompt,width,height,scales,steps,seed,nums)
|
123 |
|
124 |
|