Spaces:
Running
Running
Update
Browse files- .pre-commit-config.yaml +59 -35
- .style.yapf +0 -5
- .vscode/settings.json +30 -0
- app.py +40 -51
- model.py +13 -22
- style.css +6 -2
.pre-commit-config.yaml
CHANGED
@@ -1,36 +1,60 @@
|
|
1 |
repos:
|
2 |
-
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
repos:
|
2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v4.6.0
|
4 |
+
hooks:
|
5 |
+
- id: check-executables-have-shebangs
|
6 |
+
- id: check-json
|
7 |
+
- id: check-merge-conflict
|
8 |
+
- id: check-shebang-scripts-are-executable
|
9 |
+
- id: check-toml
|
10 |
+
- id: check-yaml
|
11 |
+
- id: end-of-file-fixer
|
12 |
+
- id: mixed-line-ending
|
13 |
+
args: ["--fix=lf"]
|
14 |
+
- id: requirements-txt-fixer
|
15 |
+
- id: trailing-whitespace
|
16 |
+
- repo: https://github.com/myint/docformatter
|
17 |
+
rev: v1.7.5
|
18 |
+
hooks:
|
19 |
+
- id: docformatter
|
20 |
+
args: ["--in-place"]
|
21 |
+
- repo: https://github.com/pycqa/isort
|
22 |
+
rev: 5.13.2
|
23 |
+
hooks:
|
24 |
+
- id: isort
|
25 |
+
args: ["--profile", "black"]
|
26 |
+
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
+
rev: v1.10.0
|
28 |
+
hooks:
|
29 |
+
- id: mypy
|
30 |
+
args: ["--ignore-missing-imports"]
|
31 |
+
additional_dependencies:
|
32 |
+
[
|
33 |
+
"types-python-slugify",
|
34 |
+
"types-requests",
|
35 |
+
"types-PyYAML",
|
36 |
+
"types-pytz",
|
37 |
+
]
|
38 |
+
- repo: https://github.com/psf/black
|
39 |
+
rev: 24.4.2
|
40 |
+
hooks:
|
41 |
+
- id: black
|
42 |
+
language_version: python3.10
|
43 |
+
args: ["--line-length", "119"]
|
44 |
+
- repo: https://github.com/kynan/nbstripout
|
45 |
+
rev: 0.7.1
|
46 |
+
hooks:
|
47 |
+
- id: nbstripout
|
48 |
+
args:
|
49 |
+
[
|
50 |
+
"--extra-keys",
|
51 |
+
"metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
|
52 |
+
]
|
53 |
+
- repo: https://github.com/nbQA-dev/nbQA
|
54 |
+
rev: 1.8.5
|
55 |
+
hooks:
|
56 |
+
- id: nbqa-black
|
57 |
+
- id: nbqa-pyupgrade
|
58 |
+
args: ["--py37-plus"]
|
59 |
+
- id: nbqa-isort
|
60 |
+
args: ["--float-to-top"]
|
.style.yapf
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
[style]
|
2 |
-
based_on_style = pep8
|
3 |
-
blank_line_before_nested_class_or_def = false
|
4 |
-
spaces_before_comment = 2
|
5 |
-
split_before_logical_operator = true
|
|
|
|
|
|
|
|
|
|
|
|
.vscode/settings.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"editor.formatOnSave": true,
|
3 |
+
"files.insertFinalNewline": false,
|
4 |
+
"[python]": {
|
5 |
+
"editor.defaultFormatter": "ms-python.black-formatter",
|
6 |
+
"editor.formatOnType": true,
|
7 |
+
"editor.codeActionsOnSave": {
|
8 |
+
"source.organizeImports": "explicit"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"[jupyter]": {
|
12 |
+
"files.insertFinalNewline": false
|
13 |
+
},
|
14 |
+
"black-formatter.args": [
|
15 |
+
"--line-length=119"
|
16 |
+
],
|
17 |
+
"isort.args": ["--profile", "black"],
|
18 |
+
"flake8.args": [
|
19 |
+
"--max-line-length=119"
|
20 |
+
],
|
21 |
+
"ruff.lint.args": [
|
22 |
+
"--line-length=119"
|
23 |
+
],
|
24 |
+
"notebook.output.scrolling": true,
|
25 |
+
"notebook.formatOnCellExecution": true,
|
26 |
+
"notebook.formatOnSave.enabled": true,
|
27 |
+
"notebook.codeActionsOnSave": {
|
28 |
+
"source.organizeImports": "explicit"
|
29 |
+
}
|
30 |
+
}
|
app.py
CHANGED
@@ -6,76 +6,65 @@ import gradio as gr
|
|
6 |
|
7 |
from model import Model
|
8 |
|
9 |
-
DESCRIPTION =
|
10 |
|
11 |
model = Model()
|
12 |
|
13 |
-
with gr.Blocks(css=
|
14 |
gr.Markdown(DESCRIPTION)
|
15 |
with gr.Row():
|
16 |
with gr.Column():
|
17 |
with gr.Row():
|
18 |
-
seed1 = gr.Number(label=
|
19 |
-
psi1 = gr.Slider(label=
|
20 |
-
minimum=0,
|
21 |
-
maximum=2,
|
22 |
-
step=0.05,
|
23 |
-
value=0.7)
|
24 |
with gr.Row():
|
25 |
-
generate_button1 = gr.Button(
|
26 |
with gr.Row():
|
27 |
-
generated_image1 = gr.Image(label=
|
28 |
-
type='numpy',
|
29 |
-
height=600)
|
30 |
|
31 |
with gr.Column():
|
32 |
with gr.Row():
|
33 |
-
seed2 = gr.Number(label=
|
34 |
-
psi2 = gr.Slider(label=
|
35 |
-
minimum=0,
|
36 |
-
maximum=2,
|
37 |
-
step=0.05,
|
38 |
-
value=0.7)
|
39 |
with gr.Row():
|
40 |
-
generate_button2 = gr.Button(
|
41 |
with gr.Row():
|
42 |
-
generated_image2 = gr.Image(label=
|
43 |
-
type='numpy',
|
44 |
-
height=600)
|
45 |
|
46 |
with gr.Row():
|
47 |
with gr.Column():
|
48 |
with gr.Row():
|
49 |
-
num_frames = gr.Slider(label=
|
50 |
-
minimum=0,
|
51 |
-
maximum=41,
|
52 |
-
step=1,
|
53 |
-
value=7)
|
54 |
with gr.Row():
|
55 |
-
interpolate_button = gr.Button(
|
56 |
with gr.Row():
|
57 |
-
interpolated_images = gr.Gallery(label=
|
58 |
-
object_fit='scale-down')
|
59 |
|
60 |
-
generate_button1.click(
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
demo.queue(max_size=10).launch()
|
|
|
6 |
|
7 |
from model import Model
|
8 |
|
9 |
+
DESCRIPTION = "# [StyleGAN-Human](https://github.com/stylegan-human/StyleGAN-Human)"
|
10 |
|
11 |
model = Model()
|
12 |
|
13 |
+
with gr.Blocks(css="style.css") as demo:
|
14 |
gr.Markdown(DESCRIPTION)
|
15 |
with gr.Row():
|
16 |
with gr.Column():
|
17 |
with gr.Row():
|
18 |
+
seed1 = gr.Number(label="Seed 1", value=6876)
|
19 |
+
psi1 = gr.Slider(label="Truncation psi 1", minimum=0, maximum=2, step=0.05, value=0.7)
|
|
|
|
|
|
|
|
|
20 |
with gr.Row():
|
21 |
+
generate_button1 = gr.Button("Generate")
|
22 |
with gr.Row():
|
23 |
+
generated_image1 = gr.Image(label="Generated Image 1", type="numpy", height=600)
|
|
|
|
|
24 |
|
25 |
with gr.Column():
|
26 |
with gr.Row():
|
27 |
+
seed2 = gr.Number(label="Seed 2", value=6886)
|
28 |
+
psi2 = gr.Slider(label="Truncation psi 2", minimum=0, maximum=2, step=0.05, value=0.7)
|
|
|
|
|
|
|
|
|
29 |
with gr.Row():
|
30 |
+
generate_button2 = gr.Button("Generate")
|
31 |
with gr.Row():
|
32 |
+
generated_image2 = gr.Image(label="Generated Image 2", type="numpy", height=600)
|
|
|
|
|
33 |
|
34 |
with gr.Row():
|
35 |
with gr.Column():
|
36 |
with gr.Row():
|
37 |
+
num_frames = gr.Slider(label="Number of Intermediate Frames", minimum=0, maximum=41, step=1, value=7)
|
|
|
|
|
|
|
|
|
38 |
with gr.Row():
|
39 |
+
interpolate_button = gr.Button("Interpolate")
|
40 |
with gr.Row():
|
41 |
+
interpolated_images = gr.Gallery(label="Output Images", object_fit="scale-down")
|
|
|
42 |
|
43 |
+
generate_button1.click(
|
44 |
+
model.generate_single_image,
|
45 |
+
inputs=[
|
46 |
+
seed1,
|
47 |
+
psi1,
|
48 |
+
],
|
49 |
+
outputs=generated_image1,
|
50 |
+
)
|
51 |
+
generate_button2.click(
|
52 |
+
model.generate_single_image,
|
53 |
+
inputs=[
|
54 |
+
seed2,
|
55 |
+
psi2,
|
56 |
+
],
|
57 |
+
outputs=generated_image2,
|
58 |
+
)
|
59 |
+
interpolate_button.click(
|
60 |
+
model.generate_interpolated_images,
|
61 |
+
inputs=[
|
62 |
+
seed1,
|
63 |
+
psi1,
|
64 |
+
seed2,
|
65 |
+
psi2,
|
66 |
+
num_frames,
|
67 |
+
],
|
68 |
+
outputs=interpolated_images,
|
69 |
+
)
|
70 |
demo.queue(max_size=10).launch()
|
model.py
CHANGED
@@ -10,21 +10,19 @@ import torch.nn as nn
|
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
|
12 |
app_dir = pathlib.Path(__file__).parent
|
13 |
-
submodule_dir = app_dir /
|
14 |
sys.path.insert(0, submodule_dir.as_posix())
|
15 |
|
16 |
|
17 |
class Model:
|
18 |
def __init__(self):
|
19 |
-
self.device = torch.device(
|
20 |
-
|
21 |
-
self.model = self.load_model('stylegan_human_v2_1024.pkl')
|
22 |
|
23 |
def load_model(self, file_name: str) -> nn.Module:
|
24 |
-
path = hf_hub_download(
|
25 |
-
|
26 |
-
|
27 |
-
model = pickle.load(f)['G_ema']
|
28 |
model.eval()
|
29 |
model.to(self.device)
|
30 |
with torch.inference_mode():
|
@@ -34,29 +32,23 @@ class Model:
|
|
34 |
return model
|
35 |
|
36 |
def generate_z(self, z_dim: int, seed: int) -> torch.Tensor:
|
37 |
-
return torch.from_numpy(np.random.RandomState(seed).randn(
|
38 |
-
1, z_dim)).to(self.device).float()
|
39 |
|
40 |
@torch.inference_mode()
|
41 |
-
def generate_single_image(self, seed: int,
|
42 |
-
truncation_psi: float) -> np.ndarray:
|
43 |
seed = int(np.clip(seed, 0, np.iinfo(np.uint32).max))
|
44 |
|
45 |
z = self.generate_z(self.model.z_dim, seed)
|
46 |
label = torch.zeros([1, self.model.c_dim], device=self.device)
|
47 |
|
48 |
-
out = self.model(z,
|
49 |
-
|
50 |
-
truncation_psi=truncation_psi,
|
51 |
-
force_fp32=True)
|
52 |
-
out = (out.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(
|
53 |
-
torch.uint8)
|
54 |
return out[0].cpu().numpy()
|
55 |
|
56 |
@torch.inference_mode()
|
57 |
def generate_interpolated_images(
|
58 |
-
|
59 |
-
|
60 |
seed0 = int(np.clip(seed0, 0, np.iinfo(np.uint32).max))
|
61 |
seed1 = int(np.clip(seed1, 0, np.iinfo(np.uint32).max))
|
62 |
|
@@ -73,8 +65,7 @@ class Model:
|
|
73 |
res = []
|
74 |
for z, psi in zip(zs, psis):
|
75 |
out = self.model(z, label, truncation_psi=psi, force_fp32=True)
|
76 |
-
out = (out.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(
|
77 |
-
torch.uint8)
|
78 |
out = out[0].cpu().numpy()
|
79 |
res.append(out)
|
80 |
return res
|
|
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
|
12 |
app_dir = pathlib.Path(__file__).parent
|
13 |
+
submodule_dir = app_dir / "StyleGAN-Human"
|
14 |
sys.path.insert(0, submodule_dir.as_posix())
|
15 |
|
16 |
|
17 |
class Model:
|
18 |
def __init__(self):
|
19 |
+
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
20 |
+
self.model = self.load_model("stylegan_human_v2_1024.pkl")
|
|
|
21 |
|
22 |
def load_model(self, file_name: str) -> nn.Module:
|
23 |
+
path = hf_hub_download("public-data/StyleGAN-Human", f"models/{file_name}")
|
24 |
+
with open(path, "rb") as f:
|
25 |
+
model = pickle.load(f)["G_ema"]
|
|
|
26 |
model.eval()
|
27 |
model.to(self.device)
|
28 |
with torch.inference_mode():
|
|
|
32 |
return model
|
33 |
|
34 |
def generate_z(self, z_dim: int, seed: int) -> torch.Tensor:
|
35 |
+
return torch.from_numpy(np.random.RandomState(seed).randn(1, z_dim)).to(self.device).float()
|
|
|
36 |
|
37 |
@torch.inference_mode()
|
38 |
+
def generate_single_image(self, seed: int, truncation_psi: float) -> np.ndarray:
|
|
|
39 |
seed = int(np.clip(seed, 0, np.iinfo(np.uint32).max))
|
40 |
|
41 |
z = self.generate_z(self.model.z_dim, seed)
|
42 |
label = torch.zeros([1, self.model.c_dim], device=self.device)
|
43 |
|
44 |
+
out = self.model(z, label, truncation_psi=truncation_psi, force_fp32=True)
|
45 |
+
out = (out.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
|
|
|
|
|
|
|
|
|
46 |
return out[0].cpu().numpy()
|
47 |
|
48 |
@torch.inference_mode()
|
49 |
def generate_interpolated_images(
|
50 |
+
self, seed0: int, psi0: float, seed1: int, psi1: float, num_intermediate: int
|
51 |
+
) -> list[np.ndarray]:
|
52 |
seed0 = int(np.clip(seed0, 0, np.iinfo(np.uint32).max))
|
53 |
seed1 = int(np.clip(seed1, 0, np.iinfo(np.uint32).max))
|
54 |
|
|
|
65 |
res = []
|
66 |
for z, psi in zip(zs, psis):
|
67 |
out = self.model(z, label, truncation_psi=psi, force_fp32=True)
|
68 |
+
out = (out.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
|
|
|
69 |
out = out[0].cpu().numpy()
|
70 |
res.append(out)
|
71 |
return res
|
style.css
CHANGED
@@ -1,7 +1,11 @@
|
|
1 |
h1 {
|
2 |
text-align: center;
|
3 |
-
}
|
4 |
-
img#visitor-badge {
|
5 |
display: block;
|
|
|
|
|
|
|
6 |
margin: auto;
|
|
|
|
|
|
|
7 |
}
|
|
|
1 |
h1 {
|
2 |
text-align: center;
|
|
|
|
|
3 |
display: block;
|
4 |
+
}
|
5 |
+
|
6 |
+
#duplicate-button {
|
7 |
margin: auto;
|
8 |
+
color: #fff;
|
9 |
+
background: #1565c0;
|
10 |
+
border-radius: 100vh;
|
11 |
}
|