Tomas Jankauskas hysts HF staff commited on
Commit
fcdc6dd
·
0 Parent(s):

Duplicate from hysts/SD-XL

Browse files

Co-authored-by: hysts <hysts@users.noreply.huggingface.co>

Files changed (11) hide show
  1. .gitattributes +35 -0
  2. .gitignore +162 -0
  3. .pre-commit-config.yaml +46 -0
  4. .style.yapf +5 -0
  5. .vscode/settings.json +18 -0
  6. LICENSE +21 -0
  7. README.md +17 -0
  8. app.py +334 -0
  9. notebook.ipynb +119 -0
  10. requirements.txt +7 -0
  11. style.css +16 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio_cached_examples/
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ share/python-wheels/
26
+ *.egg-info/
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # poetry
100
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
+ #poetry.lock
105
+
106
+ # pdm
107
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
+ #pdm.lock
109
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
+ # in version control.
111
+ # https://pdm.fming.dev/#use-with-ide
112
+ .pdm.toml
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.2.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: double-quote-string-fixer
12
+ - id: end-of-file-fixer
13
+ - id: mixed-line-ending
14
+ args: ['--fix=lf']
15
+ - id: requirements-txt-fixer
16
+ - id: trailing-whitespace
17
+ - repo: https://github.com/myint/docformatter
18
+ rev: v1.4
19
+ hooks:
20
+ - id: docformatter
21
+ args: ['--in-place']
22
+ - repo: https://github.com/pycqa/isort
23
+ rev: 5.12.0
24
+ hooks:
25
+ - id: isort
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v0.991
28
+ hooks:
29
+ - id: mypy
30
+ args: ['--ignore-missing-imports']
31
+ additional_dependencies: ['types-python-slugify']
32
+ - repo: https://github.com/google/yapf
33
+ rev: v0.32.0
34
+ hooks:
35
+ - id: yapf
36
+ args: ['--parallel', '--in-place']
37
+ - repo: https://github.com/kynan/nbstripout
38
+ rev: 0.6.0
39
+ hooks:
40
+ - id: nbstripout
41
+ args: ['--extra-keys', 'metadata.interpreter metadata.kernelspec cell.metadata.pycharm']
42
+ - repo: https://github.com/nbQA-dev/nbQA
43
+ rev: 1.7.0
44
+ hooks:
45
+ - id: nbqa-isort
46
+ - id: nbqa-yapf
.style.yapf ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [style]
2
+ based_on_style = pep8
3
+ blank_line_before_nested_class_or_def = false
4
+ spaces_before_comment = 2
5
+ split_before_logical_operator = true
.vscode/settings.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "python.linting.enabled": true,
3
+ "python.linting.flake8Enabled": true,
4
+ "python.linting.pylintEnabled": false,
5
+ "python.linting.lintOnSave": true,
6
+ "python.formatting.provider": "yapf",
7
+ "python.formatting.yapfArgs": [
8
+ "--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
9
+ ],
10
+ "[python]": {
11
+ "editor.formatOnType": true,
12
+ "editor.codeActionsOnSave": {
13
+ "source.organizeImports": true
14
+ }
15
+ },
16
+ "editor.formatOnSave": true,
17
+ "files.insertFinalNewline": true
18
+ }
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 hysts
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: SD-XL
3
+ emoji: 🌍
4
+ colorFrom: gray
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.40.1
8
+ app_file: app.py
9
+ license: mit
10
+ pinned: false
11
+ suggested_hardware: a10g-small
12
+ duplicated_from: hysts/SD-XL
13
+ ---
14
+
15
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
16
+
17
+ https://arxiv.org/abs/2307.01952
app.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import random
7
+
8
+ import gradio as gr
9
+ import numpy as np
10
+ import PIL.Image
11
+ import torch
12
+ from diffusers import DiffusionPipeline
13
+
14
+ DESCRIPTION = '# SD-XL'
15
+ if not torch.cuda.is_available():
16
+ DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
17
+
18
+ MAX_SEED = np.iinfo(np.int32).max
19
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
20
+ 'CACHE_EXAMPLES') == '1'
21
+ MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
22
+ USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
23
+ ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
24
+ ENABLE_REFINER = os.getenv('ENABLE_REFINER', '1') == '1'
25
+
26
+ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
27
+ if torch.cuda.is_available():
28
+ pipe = DiffusionPipeline.from_pretrained(
29
+ 'stabilityai/stable-diffusion-xl-base-1.0',
30
+ torch_dtype=torch.float16,
31
+ use_safetensors=True,
32
+ variant='fp16')
33
+ if ENABLE_REFINER:
34
+ refiner = DiffusionPipeline.from_pretrained(
35
+ 'stabilityai/stable-diffusion-xl-refiner-1.0',
36
+ torch_dtype=torch.float16,
37
+ use_safetensors=True,
38
+ variant='fp16')
39
+
40
+ if ENABLE_CPU_OFFLOAD:
41
+ pipe.enable_model_cpu_offload()
42
+ if ENABLE_REFINER:
43
+ refiner.enable_model_cpu_offload()
44
+ else:
45
+ pipe.to(device)
46
+ if ENABLE_REFINER:
47
+ refiner.to(device)
48
+
49
+ if USE_TORCH_COMPILE:
50
+ pipe.unet = torch.compile(pipe.unet,
51
+ mode='reduce-overhead',
52
+ fullgraph=True)
53
+ if ENABLE_REFINER:
54
+ refiner.unet = torch.compile(refiner.unet,
55
+ mode='reduce-overhead',
56
+ fullgraph=True)
57
+ else:
58
+ pipe = None
59
+ refiner = None
60
+
61
+
62
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
63
+ if randomize_seed:
64
+ seed = random.randint(0, MAX_SEED)
65
+ return seed
66
+
67
+
68
+ def generate(prompt: str,
69
+ negative_prompt: str = '',
70
+ prompt_2: str = '',
71
+ negative_prompt_2: str = '',
72
+ use_negative_prompt: bool = False,
73
+ use_prompt_2: bool = False,
74
+ use_negative_prompt_2: bool = False,
75
+ seed: int = 0,
76
+ width: int = 1024,
77
+ height: int = 1024,
78
+ guidance_scale_base: float = 5.0,
79
+ guidance_scale_refiner: float = 5.0,
80
+ num_inference_steps_base: int = 50,
81
+ num_inference_steps_refiner: int = 50,
82
+ apply_refiner: bool = False) -> PIL.Image.Image:
83
+ generator = torch.Generator().manual_seed(seed)
84
+
85
+ if not use_negative_prompt:
86
+ negative_prompt = None # type: ignore
87
+ if not use_prompt_2:
88
+ prompt_2 = None # type: ignore
89
+ if not use_negative_prompt_2:
90
+ negative_prompt_2 = None # type: ignore
91
+
92
+ if not apply_refiner:
93
+ return pipe(prompt=prompt,
94
+ negative_prompt=negative_prompt,
95
+ prompt_2=prompt_2,
96
+ negative_prompt_2=negative_prompt_2,
97
+ width=width,
98
+ height=height,
99
+ guidance_scale=guidance_scale_base,
100
+ num_inference_steps=num_inference_steps_base,
101
+ generator=generator,
102
+ output_type='pil').images[0]
103
+ else:
104
+ latents = pipe(prompt=prompt,
105
+ negative_prompt=negative_prompt,
106
+ prompt_2=prompt_2,
107
+ negative_prompt_2=negative_prompt_2,
108
+ width=width,
109
+ height=height,
110
+ guidance_scale=guidance_scale_base,
111
+ num_inference_steps=num_inference_steps_base,
112
+ generator=generator,
113
+ output_type='latent').images
114
+ image = refiner(prompt=prompt,
115
+ negative_prompt=negative_prompt,
116
+ prompt_2=prompt_2,
117
+ negative_prompt_2=negative_prompt_2,
118
+ guidance_scale=guidance_scale_refiner,
119
+ num_inference_steps=num_inference_steps_refiner,
120
+ image=latents,
121
+ generator=generator).images[0]
122
+ return image
123
+
124
+
125
+ examples = [
126
+ 'Astronaut in a jungle, cold color palette, muted colors, detailed, 8k',
127
+ 'An astronaut riding a green horse',
128
+ ]
129
+
130
+ with gr.Blocks(css='style.css') as demo:
131
+ gr.Markdown(DESCRIPTION)
132
+ gr.DuplicateButton(value='Duplicate Space for private use',
133
+ elem_id='duplicate-button',
134
+ visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')
135
+ with gr.Group():
136
+ with gr.Row():
137
+ prompt = gr.Text(
138
+ label='Prompt',
139
+ show_label=False,
140
+ max_lines=1,
141
+ placeholder='Enter your prompt',
142
+ container=False,
143
+ )
144
+ run_button = gr.Button('Run', scale=0)
145
+ result = gr.Image(label='Result', show_label=False)
146
+ with gr.Accordion('Advanced options', open=False):
147
+ with gr.Row():
148
+ use_negative_prompt = gr.Checkbox(label='Use negative prompt',
149
+ value=False)
150
+ use_prompt_2 = gr.Checkbox(label='Use prompt 2', value=False)
151
+ use_negative_prompt_2 = gr.Checkbox(label='Use negative prompt 2',
152
+ value=False)
153
+ negative_prompt = gr.Text(
154
+ label='Negative prompt',
155
+ max_lines=1,
156
+ placeholder='Enter a negative prompt',
157
+ visible=False,
158
+ )
159
+ prompt_2 = gr.Text(
160
+ label='Prompt 2',
161
+ max_lines=1,
162
+ placeholder='Enter your prompt',
163
+ visible=False,
164
+ )
165
+ negative_prompt_2 = gr.Text(
166
+ label='Negative prompt 2',
167
+ max_lines=1,
168
+ placeholder='Enter a negative prompt',
169
+ visible=False,
170
+ )
171
+
172
+ seed = gr.Slider(label='Seed',
173
+ minimum=0,
174
+ maximum=MAX_SEED,
175
+ step=1,
176
+ value=0)
177
+ randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
178
+ with gr.Row():
179
+ width = gr.Slider(
180
+ label='Width',
181
+ minimum=256,
182
+ maximum=MAX_IMAGE_SIZE,
183
+ step=32,
184
+ value=1024,
185
+ )
186
+ height = gr.Slider(
187
+ label='Height',
188
+ minimum=256,
189
+ maximum=MAX_IMAGE_SIZE,
190
+ step=32,
191
+ value=1024,
192
+ )
193
+ apply_refiner = gr.Checkbox(label='Apply refiner',
194
+ value=False,
195
+ visible=ENABLE_REFINER)
196
+ with gr.Row():
197
+ guidance_scale_base = gr.Slider(label='Guidance scale for base',
198
+ minimum=1,
199
+ maximum=20,
200
+ step=0.1,
201
+ value=5.0)
202
+ num_inference_steps_base = gr.Slider(
203
+ label='Number of inference steps for base',
204
+ minimum=10,
205
+ maximum=100,
206
+ step=1,
207
+ value=50)
208
+ with gr.Row(visible=False) as refiner_params:
209
+ guidance_scale_refiner = gr.Slider(
210
+ label='Guidance scale for refiner',
211
+ minimum=1,
212
+ maximum=20,
213
+ step=0.1,
214
+ value=5.0)
215
+ num_inference_steps_refiner = gr.Slider(
216
+ label='Number of inference steps for refiner',
217
+ minimum=10,
218
+ maximum=100,
219
+ step=1,
220
+ value=50)
221
+
222
+ gr.Examples(examples=examples,
223
+ inputs=prompt,
224
+ outputs=result,
225
+ fn=generate,
226
+ cache_examples=CACHE_EXAMPLES)
227
+
228
+ use_negative_prompt.change(
229
+ fn=lambda x: gr.update(visible=x),
230
+ inputs=use_negative_prompt,
231
+ outputs=negative_prompt,
232
+ queue=False,
233
+ api_name=False,
234
+ )
235
+ use_prompt_2.change(
236
+ fn=lambda x: gr.update(visible=x),
237
+ inputs=use_prompt_2,
238
+ outputs=prompt_2,
239
+ queue=False,
240
+ api_name=False,
241
+ )
242
+ use_negative_prompt_2.change(
243
+ fn=lambda x: gr.update(visible=x),
244
+ inputs=use_negative_prompt_2,
245
+ outputs=negative_prompt_2,
246
+ queue=False,
247
+ api_name=False,
248
+ )
249
+ apply_refiner.change(
250
+ fn=lambda x: gr.update(visible=x),
251
+ inputs=apply_refiner,
252
+ outputs=refiner_params,
253
+ queue=False,
254
+ api_name=False,
255
+ )
256
+
257
+ inputs = [
258
+ prompt,
259
+ negative_prompt,
260
+ prompt_2,
261
+ negative_prompt_2,
262
+ use_negative_prompt,
263
+ use_prompt_2,
264
+ use_negative_prompt_2,
265
+ seed,
266
+ width,
267
+ height,
268
+ guidance_scale_base,
269
+ guidance_scale_refiner,
270
+ num_inference_steps_base,
271
+ num_inference_steps_refiner,
272
+ apply_refiner,
273
+ ]
274
+ prompt.submit(
275
+ fn=randomize_seed_fn,
276
+ inputs=[seed, randomize_seed],
277
+ outputs=seed,
278
+ queue=False,
279
+ api_name=False,
280
+ ).then(
281
+ fn=generate,
282
+ inputs=inputs,
283
+ outputs=result,
284
+ api_name='run',
285
+ )
286
+ negative_prompt.submit(
287
+ fn=randomize_seed_fn,
288
+ inputs=[seed, randomize_seed],
289
+ outputs=seed,
290
+ queue=False,
291
+ api_name=False,
292
+ ).then(
293
+ fn=generate,
294
+ inputs=inputs,
295
+ outputs=result,
296
+ api_name=False,
297
+ )
298
+ prompt_2.submit(
299
+ fn=randomize_seed_fn,
300
+ inputs=[seed, randomize_seed],
301
+ outputs=seed,
302
+ queue=False,
303
+ api_name=False,
304
+ ).then(
305
+ fn=generate,
306
+ inputs=inputs,
307
+ outputs=result,
308
+ api_name=False,
309
+ )
310
+ negative_prompt_2.submit(
311
+ fn=randomize_seed_fn,
312
+ inputs=[seed, randomize_seed],
313
+ outputs=seed,
314
+ queue=False,
315
+ api_name=False,
316
+ ).then(
317
+ fn=generate,
318
+ inputs=inputs,
319
+ outputs=result,
320
+ api_name=False,
321
+ )
322
+ run_button.click(
323
+ fn=randomize_seed_fn,
324
+ inputs=[seed, randomize_seed],
325
+ outputs=seed,
326
+ queue=False,
327
+ api_name=False,
328
+ ).then(
329
+ fn=generate,
330
+ inputs=inputs,
331
+ outputs=result,
332
+ api_name=False,
333
+ )
334
+ demo.queue(max_size=20).launch()
notebook.ipynb ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "colab": {
8
+ "base_uri": "https://localhost:8080/"
9
+ },
10
+ "id": "-ayiROzqJeXB",
11
+ "outputId": "c7710012-c311-4203-9dc8-6333f0eb0e66"
12
+ },
13
+ "outputs": [],
14
+ "source": [
15
+ "!git clone -q https://huggingface.co/spaces/hysts/SD-XL"
16
+ ]
17
+ },
18
+ {
19
+ "cell_type": "code",
20
+ "execution_count": null,
21
+ "metadata": {
22
+ "colab": {
23
+ "base_uri": "https://localhost:8080/"
24
+ },
25
+ "id": "d-GcdYVAJmt0",
26
+ "outputId": "d7489bf9-df54-4e96-ed1e-cf392f34b890"
27
+ },
28
+ "outputs": [],
29
+ "source": [
30
+ "%cd SD-XL"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "metadata": {
37
+ "id": "vTWR-Xr6JoBJ"
38
+ },
39
+ "outputs": [],
40
+ "source": [
41
+ "!pip install -q -r requirements.txt"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": null,
47
+ "metadata": {
48
+ "id": "QSF_GqTKJsN5"
49
+ },
50
+ "outputs": [],
51
+ "source": [
52
+ "import os\n",
53
+ "\n",
54
+ "os.environ['ENABLE_REFINER'] = '0'"
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "execution_count": null,
60
+ "metadata": {
61
+ "colab": {
62
+ "base_uri": "https://localhost:8080/",
63
+ "height": 710,
64
+ "referenced_widgets": [
65
+ "68c1e33d84b94f009db258e278fe7068",
66
+ "b1b1ca6d1cc44a738c3b4b6de17f3a5b",
67
+ "104833166be14046873bfea2c1a2a887",
68
+ "32f25821a48d4c9589f58c134e3b56d7",
69
+ "3ed7cc7759074df58a91fd7fb28a4933",
70
+ "c8885bd4a35d4cdcbb6acce5c52e15e2",
71
+ "5d1d83dfd090460d9f948b71f95aaed8",
72
+ "773e06ed1d734e53a7def5305cd35131",
73
+ "753b336dbeb147349e4520715035d8da",
74
+ "c5215236213242b89a971a1095afcea5",
75
+ "bd0a6a0e16944533b59eaa3f5188e99f",
76
+ "96b1de32a367400bba75babd39bc7308",
77
+ "65291f8203964f4499a1b422af91f75e",
78
+ "0c3fad2a850b4320b47586ff4d0ac73e",
79
+ "69a6be1033c5424988a702c5d69590ee",
80
+ "b22729413d9b449a94892b91d95cf1e4",
81
+ "6c8f51c69f394eeea67eb515831f60b2",
82
+ "bb779e8367e44a939d607ace70493d94",
83
+ "4d3862b22c3245d8b3d8b6442e149c8d",
84
+ "16ef5a40c9d441aea180d1732442df97",
85
+ "db54ca7070cf43adbda196d44967464c",
86
+ "cadddb2624804c308710a219bf8cf4f3"
87
+ ]
88
+ },
89
+ "id": "4FTmJkt_J8j_",
90
+ "outputId": "850aba86-acb4-4452-bac2-28b5c815ec0f"
91
+ },
92
+ "outputs": [],
93
+ "source": [
94
+ "import app"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": null,
100
+ "metadata": {
101
+ "id": "LJbHj7yLJ9p0"
102
+ },
103
+ "outputs": [],
104
+ "source": []
105
+ }
106
+ ],
107
+ "metadata": {
108
+ "accelerator": "GPU",
109
+ "colab": {
110
+ "gpuType": "T4",
111
+ "provenance": []
112
+ },
113
+ "language_info": {
114
+ "name": "python"
115
+ }
116
+ },
117
+ "nbformat": 4,
118
+ "nbformat_minor": 0
119
+ }
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ accelerate==0.21.0
2
+ diffusers==0.19.3
3
+ gradio==3.40.1
4
+ invisible-watermark==0.2.0
5
+ Pillow==10.0.0
6
+ torch==2.0.1
7
+ transformers==4.31.0
style.css ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+
5
+ #duplicate-button {
6
+ margin: auto;
7
+ color: #fff;
8
+ background: #1565c0;
9
+ border-radius: 100vh;
10
+ }
11
+
12
+ #component-0 {
13
+ max-width: 730px;
14
+ margin: auto;
15
+ padding-top: 1.5rem;
16
+ }