Charbel Malo
commited on
Commit
•
0398b2f
1
Parent(s):
dbd6b54
Update app.py
Browse files
app.py
CHANGED
@@ -115,9 +115,11 @@ if USE_CUDA:
|
|
115 |
else:
|
116 |
USE_CUDA = False
|
117 |
print("\n********** CUDA unavailable running on CPU **********\n")
|
|
|
118 |
else:
|
119 |
USE_CUDA = False
|
120 |
print("\n********** Running on CPU **********\n")
|
|
|
121 |
|
122 |
device = "cuda" if USE_CUDA else "cpu"
|
123 |
EMPTY_CACHE = lambda: torch.cuda.empty_cache() if device == "cuda" else None
|
@@ -146,7 +148,7 @@ def load_face_swapper_model(path="./assets/pretrained_models/inswapper_128.onnx"
|
|
146 |
def load_face_parser_model(path="./assets/pretrained_models/79999_iter.pth"):
|
147 |
global FACE_PARSER
|
148 |
if FACE_PARSER is None:
|
149 |
-
FACE_PARSER = init_parsing_model(path, device=
|
150 |
|
151 |
@spaces.GPU(enable_queue=True)
|
152 |
def load_nsfw_detector_model(path="./assets/pretrained_models/open-nsfw.onnx"):
|
@@ -155,7 +157,10 @@ def load_nsfw_detector_model(path="./assets/pretrained_models/open-nsfw.onnx"):
|
|
155 |
NSFW_DETECTOR = NSFWChecker(model_path=path, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
|
156 |
|
157 |
|
|
|
158 |
load_face_analyser_model()
|
|
|
|
|
159 |
load_face_swapper_model()
|
160 |
|
161 |
## ------------------------------ MAIN PROCESS ------------------------------
|
@@ -198,9 +203,22 @@ def process(
|
|
198 |
global FACE_ANALYSER, FACE_SWAPPER, FACE_ENHANCER, FACE_PARSER, NSFW_DETECTOR
|
199 |
|
200 |
WORKSPACE, OUTPUT_FILE, PREVIEW = None, None, None
|
201 |
-
USE_CUDA
|
202 |
-
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
# Reset models to None to reload them with GPU
|
206 |
FACE_ANALYSER = None
|
@@ -968,6 +986,7 @@ if __name__ == "__main__":
|
|
968 |
if USE_COLAB:
|
969 |
print("Running in colab mode")
|
970 |
|
|
|
971 |
interface.launch()
|
972 |
|
973 |
|
|
|
115 |
else:
|
116 |
USE_CUDA = False
|
117 |
print("\n********** CUDA unavailable running on CPU **********\n")
|
118 |
+
PROVIDER = ["CPUExecutionProvider"]
|
119 |
else:
|
120 |
USE_CUDA = False
|
121 |
print("\n********** Running on CPU **********\n")
|
122 |
+
PROVIDER = ["CPUExecutionProvider"]
|
123 |
|
124 |
device = "cuda" if USE_CUDA else "cpu"
|
125 |
EMPTY_CACHE = lambda: torch.cuda.empty_cache() if device == "cuda" else None
|
|
|
148 |
def load_face_parser_model(path="./assets/pretrained_models/79999_iter.pth"):
|
149 |
global FACE_PARSER
|
150 |
if FACE_PARSER is None:
|
151 |
+
FACE_PARSER = init_parsing_model(path, device="cuda")
|
152 |
|
153 |
@spaces.GPU(enable_queue=True)
|
154 |
def load_nsfw_detector_model(path="./assets/pretrained_models/open-nsfw.onnx"):
|
|
|
157 |
NSFW_DETECTOR = NSFWChecker(model_path=path, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
|
158 |
|
159 |
|
160 |
+
@spaces.GPU(enable_queue=True)
|
161 |
load_face_analyser_model()
|
162 |
+
|
163 |
+
@spaces.GPU(enable_queue=True)
|
164 |
load_face_swapper_model()
|
165 |
|
166 |
## ------------------------------ MAIN PROCESS ------------------------------
|
|
|
203 |
global FACE_ANALYSER, FACE_SWAPPER, FACE_ENHANCER, FACE_PARSER, NSFW_DETECTOR
|
204 |
|
205 |
WORKSPACE, OUTPUT_FILE, PREVIEW = None, None, None
|
206 |
+
if USE_CUDA:
|
207 |
+
available_providers = onnxruntime.get_available_providers()
|
208 |
+
if "CUDAExecutionProvider" in available_providers:
|
209 |
+
print("\n********** Running on CUDA **********\n")
|
210 |
+
PROVIDER = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
211 |
+
else:
|
212 |
+
USE_CUDA = False
|
213 |
+
print("\n********** CUDA unavailable running on CPU **********\n")
|
214 |
+
PROVIDER = ["CPUExecutionProvider"]
|
215 |
+
else:
|
216 |
+
USE_CUDA = False
|
217 |
+
print("\n********** Running on CPU **********\n")
|
218 |
+
PROVIDER = ["CPUExecutionProvider"]
|
219 |
+
|
220 |
+
device = "cuda" if USE_CUDA else "cpu"
|
221 |
+
EMPTY_CACHE = lambda: torch.cuda.empty_cache() if device == "cuda" else None
|
222 |
|
223 |
# Reset models to None to reload them with GPU
|
224 |
FACE_ANALYSER = None
|
|
|
986 |
if USE_COLAB:
|
987 |
print("Running in colab mode")
|
988 |
|
989 |
+
interface.queue()
|
990 |
interface.launch()
|
991 |
|
992 |
|