johann-foerster
commited on
Commit
·
ac60dae
1
Parent(s):
3eb4ddb
pack models etc into build
Browse files- launch.py +1 -78
- prepare.py +71 -1
launch.py
CHANGED
@@ -1,76 +1,5 @@
|
|
1 |
-
import os
|
2 |
import sys
|
3 |
-
import
|
4 |
-
import fooocus_version
|
5 |
-
|
6 |
-
from modules.launch_util import is_installed, run, python, \
|
7 |
-
run_pip, repo_dir, git_clone, requirements_met, script_path, dir_repos
|
8 |
-
from modules.model_loader import load_file_from_url
|
9 |
-
from modules.path import modelfile_path, lorafile_path
|
10 |
-
|
11 |
-
REINSTALL_ALL = False
|
12 |
-
|
13 |
-
|
14 |
-
def prepare_environment():
|
15 |
-
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
|
16 |
-
torch_command = os.environ.get('TORCH_COMMAND',
|
17 |
-
f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
|
18 |
-
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
|
19 |
-
|
20 |
-
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
|
21 |
-
|
22 |
-
comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/comfyanonymous/ComfyUI")
|
23 |
-
comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "2bc12d3d22efb5c63ae3a7fc342bb2dd16b31735")
|
24 |
-
|
25 |
-
print(f"Python {sys.version}")
|
26 |
-
print(f"Fooocus version: {fooocus_version.version}")
|
27 |
-
|
28 |
-
comfyui_name = 'ComfyUI-from-StabilityAI-Official'
|
29 |
-
git_clone(comfy_repo, repo_dir(comfyui_name), "Inference Engine", comfy_commit_hash)
|
30 |
-
sys.path.append(os.path.join(script_path, dir_repos, comfyui_name))
|
31 |
-
|
32 |
-
if REINSTALL_ALL or not is_installed("torch") or not is_installed("torchvision"):
|
33 |
-
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
|
34 |
-
|
35 |
-
if REINSTALL_ALL or not is_installed("xformers"):
|
36 |
-
if platform.system() == "Windows":
|
37 |
-
if platform.python_version().startswith("3.10"):
|
38 |
-
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
|
39 |
-
else:
|
40 |
-
print("Installation of xformers is not supported in this version of Python.")
|
41 |
-
print(
|
42 |
-
"You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
|
43 |
-
if not is_installed("xformers"):
|
44 |
-
exit(0)
|
45 |
-
elif platform.system() == "Linux":
|
46 |
-
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
|
47 |
-
|
48 |
-
if REINSTALL_ALL or not requirements_met(requirements_file):
|
49 |
-
run_pip(f"install -r \"{requirements_file}\"", "requirements")
|
50 |
-
|
51 |
-
return
|
52 |
-
|
53 |
-
|
54 |
-
model_filenames = [
|
55 |
-
('sd_xl_base_1.0_0.9vae.safetensors',
|
56 |
-
'https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors'),
|
57 |
-
('sd_xl_refiner_1.0_0.9vae.safetensors',
|
58 |
-
'https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0_0.9vae.safetensors')
|
59 |
-
]
|
60 |
-
|
61 |
-
lora_filenames = [
|
62 |
-
('sd_xl_offset_example-lora_1.0.safetensors',
|
63 |
-
'https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors')
|
64 |
-
]
|
65 |
-
|
66 |
-
|
67 |
-
def download_models():
|
68 |
-
for file_name, url in model_filenames:
|
69 |
-
load_file_from_url(url=url, model_dir=modelfile_path, file_name=file_name)
|
70 |
-
for file_name, url in lora_filenames:
|
71 |
-
load_file_from_url(url=url, model_dir=lorafile_path, file_name=file_name)
|
72 |
-
return
|
73 |
-
|
74 |
|
75 |
def cuda_malloc():
|
76 |
argv = sys.argv
|
@@ -78,11 +7,5 @@ def cuda_malloc():
|
|
78 |
import cuda_malloc
|
79 |
sys.argv = argv
|
80 |
|
81 |
-
|
82 |
-
prepare_environment()
|
83 |
-
|
84 |
cuda_malloc()
|
85 |
-
|
86 |
-
download_models()
|
87 |
-
|
88 |
from webui import *
|
|
|
|
|
1 |
import sys
|
2 |
+
from prepare import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
def cuda_malloc():
|
5 |
argv = sys.argv
|
|
|
7 |
import cuda_malloc
|
8 |
sys.argv = argv
|
9 |
|
|
|
|
|
|
|
10 |
cuda_malloc()
|
|
|
|
|
|
|
11 |
from webui import *
|
prepare.py
CHANGED
@@ -1,4 +1,74 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
prepare_environment()
|
4 |
download_models()
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import platform
|
4 |
+
import fooocus_version
|
5 |
+
|
6 |
+
from modules.launch_util import is_installed, run, python, \
|
7 |
+
run_pip, repo_dir, git_clone, requirements_met, script_path, dir_repos
|
8 |
+
from modules.model_loader import load_file_from_url
|
9 |
+
from modules.path import modelfile_path, lorafile_path
|
10 |
+
|
11 |
+
REINSTALL_ALL = False
|
12 |
+
|
13 |
+
def prepare_environment():
|
14 |
+
torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
|
15 |
+
torch_command = os.environ.get('TORCH_COMMAND',
|
16 |
+
f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
|
17 |
+
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
|
18 |
+
|
19 |
+
xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
|
20 |
+
|
21 |
+
comfy_repo = os.environ.get('COMFY_REPO', "https://github.com/comfyanonymous/ComfyUI")
|
22 |
+
comfy_commit_hash = os.environ.get('COMFY_COMMIT_HASH', "2bc12d3d22efb5c63ae3a7fc342bb2dd16b31735")
|
23 |
+
|
24 |
+
print(f"Python {sys.version}")
|
25 |
+
print(f"Fooocus version: {fooocus_version.version}")
|
26 |
+
|
27 |
+
comfyui_name = 'ComfyUI-from-StabilityAI-Official'
|
28 |
+
git_clone(comfy_repo, repo_dir(comfyui_name), "Inference Engine", comfy_commit_hash)
|
29 |
+
sys.path.append(os.path.join(script_path, dir_repos, comfyui_name))
|
30 |
+
|
31 |
+
if REINSTALL_ALL or not is_installed("torch") or not is_installed("torchvision"):
|
32 |
+
run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
|
33 |
+
|
34 |
+
if REINSTALL_ALL or not is_installed("xformers"):
|
35 |
+
if platform.system() == "Windows":
|
36 |
+
if platform.python_version().startswith("3.10"):
|
37 |
+
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
|
38 |
+
else:
|
39 |
+
print("Installation of xformers is not supported in this version of Python.")
|
40 |
+
print(
|
41 |
+
"You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
|
42 |
+
if not is_installed("xformers"):
|
43 |
+
exit(0)
|
44 |
+
elif platform.system() == "Linux":
|
45 |
+
run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
|
46 |
+
|
47 |
+
if REINSTALL_ALL or not requirements_met(requirements_file):
|
48 |
+
run_pip(f"install -r \"{requirements_file}\"", "requirements")
|
49 |
+
|
50 |
+
return
|
51 |
+
|
52 |
+
|
53 |
+
model_filenames = [
|
54 |
+
('sd_xl_base_1.0_0.9vae.safetensors',
|
55 |
+
'https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0_0.9vae.safetensors'),
|
56 |
+
('sd_xl_refiner_1.0_0.9vae.safetensors',
|
57 |
+
'https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0_0.9vae.safetensors')
|
58 |
+
]
|
59 |
+
|
60 |
+
lora_filenames = [
|
61 |
+
('sd_xl_offset_example-lora_1.0.safetensors',
|
62 |
+
'https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors')
|
63 |
+
]
|
64 |
+
|
65 |
+
|
66 |
+
def download_models():
|
67 |
+
for file_name, url in model_filenames:
|
68 |
+
load_file_from_url(url=url, model_dir=modelfile_path, file_name=file_name)
|
69 |
+
for file_name, url in lora_filenames:
|
70 |
+
load_file_from_url(url=url, model_dir=lorafile_path, file_name=file_name)
|
71 |
+
return
|
72 |
|
73 |
prepare_environment()
|
74 |
download_models()
|