haakohu commited on
Commit
44539fc
1 Parent(s): 97a6728
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +1 -8
  2. {deep_privacy/configs → configs}/anonymizers/FB_cse.py +0 -0
  3. {deep_privacy/configs → configs}/anonymizers/FB_cse_mask.py +0 -0
  4. {deep_privacy/configs → configs}/anonymizers/FB_cse_mask_face.py +0 -0
  5. {deep_privacy/configs → configs}/anonymizers/deep_privacy1.py +0 -0
  6. {deep_privacy/configs → configs}/anonymizers/face.py +0 -0
  7. {deep_privacy/configs → configs}/anonymizers/face_fdf128.py +0 -0
  8. {deep_privacy/configs → configs}/anonymizers/market1501/blackout.py +0 -0
  9. {deep_privacy/configs → configs}/anonymizers/market1501/person.py +0 -0
  10. {deep_privacy/configs → configs}/anonymizers/market1501/pixelation16.py +0 -0
  11. {deep_privacy/configs → configs}/anonymizers/market1501/pixelation8.py +0 -0
  12. {deep_privacy/configs → configs}/datasets/coco_cse.py +0 -0
  13. {deep_privacy/configs → configs}/datasets/fdf128.py +0 -0
  14. {deep_privacy/configs → configs}/datasets/fdf256.py +0 -0
  15. {deep_privacy/configs → configs}/datasets/fdh.py +0 -0
  16. {deep_privacy/configs → configs}/datasets/utils.py +0 -0
  17. {deep_privacy/configs → configs}/defaults.py +0 -0
  18. {deep_privacy/configs → configs}/discriminators/sg2_discriminator.py +0 -0
  19. {deep_privacy/configs → configs}/fdf/deep_privacy1.py +0 -0
  20. {deep_privacy/configs → configs}/fdf/stylegan.py +0 -0
  21. {deep_privacy/configs → configs}/fdf/stylegan_fdf128.py +0 -0
  22. {deep_privacy/configs → configs}/fdh/styleganL.py +0 -0
  23. {deep_privacy/configs → configs}/fdh/styleganL_nocse.py +0 -0
  24. {deep_privacy/configs → configs}/generators/stylegan_unet.py +0 -0
  25. deep_privacy/.gitignore +0 -54
  26. deep_privacy/CHANGELOG.md +0 -13
  27. deep_privacy/Dockerfile +0 -47
  28. deep_privacy/LICENSE +0 -201
  29. deep_privacy/anonymize.py +0 -255
  30. deep_privacy/attribute_guided_demo.py +0 -144
  31. deep_privacy/readme.md +0 -209
  32. deep_privacy/setup.py +0 -46
  33. deep_privacy/stylemc.py +0 -180
  34. deep_privacy/tools/__init__.py +0 -0
  35. deep_privacy/tools/compute_cluster_means.py +0 -47
  36. deep_privacy/tools/dryrun.py +0 -49
  37. deep_privacy/tools/inspect_dataset.py +0 -52
  38. deep_privacy/tools/show_examples.py +0 -87
  39. deep_privacy/train.py +0 -190
  40. deep_privacy/validate.py +0 -64
  41. {deep_privacy/dp2 → dp2}/__init__.py +0 -0
  42. {deep_privacy/dp2 → dp2}/anonymizer/__init__.py +0 -0
  43. {deep_privacy/dp2 → dp2}/anonymizer/anonymizer.py +0 -0
  44. {deep_privacy/dp2 → dp2}/anonymizer/histogram_match_anonymizers.py +0 -0
  45. {deep_privacy/dp2 → dp2}/data/__init__.py +0 -0
  46. {deep_privacy/dp2 → dp2}/data/build.py +0 -0
  47. {deep_privacy/dp2 → dp2}/data/datasets/__init__.py +0 -0
  48. {deep_privacy/dp2 → dp2}/data/datasets/coco_cse.py +0 -0
  49. {deep_privacy/dp2 → dp2}/data/datasets/fdf.py +0 -0
  50. {deep_privacy/dp2 → dp2}/data/datasets/fdf128_wds.py +0 -0
app.py CHANGED
@@ -1,7 +1,5 @@
1
  import gradio
2
- import sys
3
  import os
4
- from pathlib import Path
5
  from tops.config import instantiate
6
  import gradio.inputs
7
  os.system("pip install --upgrade pip")
@@ -9,16 +7,11 @@ os.system("pip install ftfy regex tqdm")
9
  os.system("pip install --no-deps git+https://github.com/openai/CLIP.git")
10
  os.system("pip install git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose")
11
  os.system("pip install --no-deps git+https://github.com/hukkelas/DSFD-Pytorch-Inference")
12
- sys.path.insert(0, Path(os.getcwd(), "deep_privacy"))
13
  os.environ["TORCH_HOME"] = "torch_home"
14
  from dp2 import utils
15
  from gradio_demos.modules import ExampleDemo, WebcamDemo
16
 
17
- cfg_face = utils.load_config("deep_privacy/configs/anonymizers/face.py")
18
- for key in ["person_G_cfg", "cse_person_G_cfg", "face_G_cfg", "car_G_cfg"]:
19
- if key in cfg_face.anonymizer:
20
- cfg_face.anonymizer[key] = Path("deep_privacy", cfg_face.anonymizer[key])
21
-
22
 
23
  anonymizer_face = instantiate(cfg_face.anonymizer, load_cache=False)
24
 
 
1
  import gradio
 
2
  import os
 
3
  from tops.config import instantiate
4
  import gradio.inputs
5
  os.system("pip install --upgrade pip")
 
7
  os.system("pip install --no-deps git+https://github.com/openai/CLIP.git")
8
  os.system("pip install git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose")
9
  os.system("pip install --no-deps git+https://github.com/hukkelas/DSFD-Pytorch-Inference")
 
10
  os.environ["TORCH_HOME"] = "torch_home"
11
  from dp2 import utils
12
  from gradio_demos.modules import ExampleDemo, WebcamDemo
13
 
14
+ cfg_face = utils.load_config("configs/anonymizers/face.py")
 
 
 
 
15
 
16
  anonymizer_face = instantiate(cfg_face.anonymizer, load_cache=False)
17
 
{deep_privacy/configs → configs}/anonymizers/FB_cse.py RENAMED
File without changes
{deep_privacy/configs → configs}/anonymizers/FB_cse_mask.py RENAMED
File without changes
{deep_privacy/configs → configs}/anonymizers/FB_cse_mask_face.py RENAMED
File without changes
{deep_privacy/configs → configs}/anonymizers/deep_privacy1.py RENAMED
File without changes
{deep_privacy/configs → configs}/anonymizers/face.py RENAMED
File without changes
{deep_privacy/configs → configs}/anonymizers/face_fdf128.py RENAMED
File without changes
{deep_privacy/configs → configs}/anonymizers/market1501/blackout.py RENAMED
File without changes
{deep_privacy/configs → configs}/anonymizers/market1501/person.py RENAMED
File without changes
{deep_privacy/configs → configs}/anonymizers/market1501/pixelation16.py RENAMED
File without changes
{deep_privacy/configs → configs}/anonymizers/market1501/pixelation8.py RENAMED
File without changes
{deep_privacy/configs → configs}/datasets/coco_cse.py RENAMED
File without changes
{deep_privacy/configs → configs}/datasets/fdf128.py RENAMED
File without changes
{deep_privacy/configs → configs}/datasets/fdf256.py RENAMED
File without changes
{deep_privacy/configs → configs}/datasets/fdh.py RENAMED
File without changes
{deep_privacy/configs → configs}/datasets/utils.py RENAMED
File without changes
{deep_privacy/configs → configs}/defaults.py RENAMED
File without changes
{deep_privacy/configs → configs}/discriminators/sg2_discriminator.py RENAMED
File without changes
{deep_privacy/configs → configs}/fdf/deep_privacy1.py RENAMED
File without changes
{deep_privacy/configs → configs}/fdf/stylegan.py RENAMED
File without changes
{deep_privacy/configs → configs}/fdf/stylegan_fdf128.py RENAMED
File without changes
{deep_privacy/configs → configs}/fdh/styleganL.py RENAMED
File without changes
{deep_privacy/configs → configs}/fdh/styleganL_nocse.py RENAMED
File without changes
{deep_privacy/configs → configs}/generators/stylegan_unet.py RENAMED
File without changes
deep_privacy/.gitignore DELETED
@@ -1,54 +0,0 @@
1
- # FILES
2
- *.yaml
3
- *.pkl
4
- *.flist
5
- *.zip
6
- *.out
7
- *.npy
8
- *.gz
9
- *.ckpt
10
- *.pth
11
- *.log
12
- *.pyc
13
- *.csv
14
- *.yml
15
- *.ods
16
- *.ods#
17
- *.json
18
- build_docker.sh
19
-
20
- # Images / Videos
21
- #*.png
22
- #*.jpg
23
- *.jpeg
24
- *.m4a
25
- *.mkv
26
- *.mp4
27
-
28
- # Directories created by inpaintron
29
- .cache/
30
- test_examples/
31
- .vscode
32
- __pycache__
33
- .debug/
34
- **/.ipynb_checkpoints/**
35
- outputs/
36
-
37
-
38
- # From pip setup
39
- build/
40
- *.egg-info
41
- *.egg
42
- .npm/
43
-
44
- # From dockerfile
45
- .bash_history
46
- .viminfo
47
- .local/
48
- *.pickle
49
- *.onnx
50
-
51
-
52
- sbatch_files/
53
- figures/
54
- image_dump/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/CHANGELOG.md DELETED
@@ -1,13 +0,0 @@
1
- # Changelog
2
-
3
- ## 23.03.2023
4
- - Quality of life improvements
5
- - Add support for refined keypoints for the FDH dataset.
6
- - Add FDF128 dataset loader with webdataset.
7
- - Support for using detector and anonymizer from DeepPrivacy1.
8
- - Update visualization of keypoints
9
- - Fix bug for upsampling/downsampling in the anonymization pipeline.
10
- - Support for keypoint-guided face anonymization.
11
- - Add ViTPose + Mask-RCNN detection model for keypoint-guided full-body anonymization.
12
- - Set caching of detections to False as default, as it can produce unexpected behaviour. For example, using a different score threshold requires re-run of detector.
13
- - Add Gradio Demos for face and body anonymization
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/Dockerfile DELETED
@@ -1,47 +0,0 @@
1
- FROM nvcr.io/nvidia/pytorch:22.08-py3
2
- ARG UID=1000
3
- ARG UNAME=testuser
4
- ARG WANDB_API_KEY
5
- RUN useradd -ms /bin/bash -u $UID $UNAME && \
6
- mkdir -p /home/${UNAME} &&\
7
- chown -R $UID /home/${UNAME}
8
- WORKDIR /home/${UNAME}
9
- ENV DEBIAN_FRONTEND="noninteractive"
10
- ENV WANDB_API_KEY=$WANDB_API_KEY
11
- ENV TORCH_HOME=/home/${UNAME}/.cache
12
-
13
- # OPTIONAL - DeepPrivacy2 uses these environment variables to set directories outside the current working directory
14
- #ENV BASE_DATASET_DIR=/work/haakohu/datasets
15
- #ENV BASE_OUTPUT_DIR=/work/haakohu/outputs
16
- #ENV FBA_METRICS_CACHE=/work/haakohu/metrics_cache
17
-
18
- RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 qt5-default -y
19
- RUN pip install git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose
20
- COPY setup.py setup.py
21
- RUN pip install \
22
- numpy>=1.20 \
23
- matplotlib \
24
- cython \
25
- tensorboard \
26
- tqdm \
27
- ninja==1.10.2 \
28
- opencv-python==4.5.5.64 \
29
- moviepy \
30
- pyspng \
31
- git+https://github.com/hukkelas/DSFD-Pytorch-Inference \
32
- wandb \
33
- termcolor \
34
- git+https://github.com/hukkelas/torch_ops.git \
35
- git+https://github.com/wmuron/motpy@c77f85d27e371c0a298e9a88ca99292d9b9cbe6b \
36
- fast_pytorch_kmeans \
37
- einops_exts \
38
- einops \
39
- regex \
40
- setuptools==59.5.0 \
41
- resize_right==0.0.2 \
42
- pillow \
43
- scipy==1.7.1 \
44
- webdataset==0.2.26 \
45
- scikit-image \
46
- timm==0.6.7
47
- RUN pip install --no-deps torch_fidelity==0.3.0 clip@git+https://github.com/openai/CLIP.git@b46f5ac7587d2e1862f8b7b1573179d80dcdd620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/LICENSE DELETED
@@ -1,201 +0,0 @@
1
- Apache License
2
- Version 2.0, January 2004
3
- http://www.apache.org/licenses/
4
-
5
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
-
7
- 1. Definitions.
8
-
9
- "License" shall mean the terms and conditions for use, reproduction,
10
- and distribution as defined by Sections 1 through 9 of this document.
11
-
12
- "Licensor" shall mean the copyright owner or entity authorized by
13
- the copyright owner that is granting the License.
14
-
15
- "Legal Entity" shall mean the union of the acting entity and all
16
- other entities that control, are controlled by, or are under common
17
- control with that entity. For the purposes of this definition,
18
- "control" means (i) the power, direct or indirect, to cause the
19
- direction or management of such entity, whether by contract or
20
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
- outstanding shares, or (iii) beneficial ownership of such entity.
22
-
23
- "You" (or "Your") shall mean an individual or Legal Entity
24
- exercising permissions granted by this License.
25
-
26
- "Source" form shall mean the preferred form for making modifications,
27
- including but not limited to software source code, documentation
28
- source, and configuration files.
29
-
30
- "Object" form shall mean any form resulting from mechanical
31
- transformation or translation of a Source form, including but
32
- not limited to compiled object code, generated documentation,
33
- and conversions to other media types.
34
-
35
- "Work" shall mean the work of authorship, whether in Source or
36
- Object form, made available under the License, as indicated by a
37
- copyright notice that is included in or attached to the work
38
- (an example is provided in the Appendix below).
39
-
40
- "Derivative Works" shall mean any work, whether in Source or Object
41
- form, that is based on (or derived from) the Work and for which the
42
- editorial revisions, annotations, elaborations, or other modifications
43
- represent, as a whole, an original work of authorship. For the purposes
44
- of this License, Derivative Works shall not include works that remain
45
- separable from, or merely link (or bind by name) to the interfaces of,
46
- the Work and Derivative Works thereof.
47
-
48
- "Contribution" shall mean any work of authorship, including
49
- the original version of the Work and any modifications or additions
50
- to that Work or Derivative Works thereof, that is intentionally
51
- submitted to Licensor for inclusion in the Work by the copyright owner
52
- or by an individual or Legal Entity authorized to submit on behalf of
53
- the copyright owner. For the purposes of this definition, "submitted"
54
- means any form of electronic, verbal, or written communication sent
55
- to the Licensor or its representatives, including but not limited to
56
- communication on electronic mailing lists, source code control systems,
57
- and issue tracking systems that are managed by, or on behalf of, the
58
- Licensor for the purpose of discussing and improving the Work, but
59
- excluding communication that is conspicuously marked or otherwise
60
- designated in writing by the copyright owner as "Not a Contribution."
61
-
62
- "Contributor" shall mean Licensor and any individual or Legal Entity
63
- on behalf of whom a Contribution has been received by Licensor and
64
- subsequently incorporated within the Work.
65
-
66
- 2. Grant of Copyright License. Subject to the terms and conditions of
67
- this License, each Contributor hereby grants to You a perpetual,
68
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
- copyright license to reproduce, prepare Derivative Works of,
70
- publicly display, publicly perform, sublicense, and distribute the
71
- Work and such Derivative Works in Source or Object form.
72
-
73
- 3. Grant of Patent License. Subject to the terms and conditions of
74
- this License, each Contributor hereby grants to You a perpetual,
75
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
- (except as stated in this section) patent license to make, have made,
77
- use, offer to sell, sell, import, and otherwise transfer the Work,
78
- where such license applies only to those patent claims licensable
79
- by such Contributor that are necessarily infringed by their
80
- Contribution(s) alone or by combination of their Contribution(s)
81
- with the Work to which such Contribution(s) was submitted. If You
82
- institute patent litigation against any entity (including a
83
- cross-claim or counterclaim in a lawsuit) alleging that the Work
84
- or a Contribution incorporated within the Work constitutes direct
85
- or contributory patent infringement, then any patent licenses
86
- granted to You under this License for that Work shall terminate
87
- as of the date such litigation is filed.
88
-
89
- 4. Redistribution. You may reproduce and distribute copies of the
90
- Work or Derivative Works thereof in any medium, with or without
91
- modifications, and in Source or Object form, provided that You
92
- meet the following conditions:
93
-
94
- (a) You must give any other recipients of the Work or
95
- Derivative Works a copy of this License; and
96
-
97
- (b) You must cause any modified files to carry prominent notices
98
- stating that You changed the files; and
99
-
100
- (c) You must retain, in the Source form of any Derivative Works
101
- that You distribute, all copyright, patent, trademark, and
102
- attribution notices from the Source form of the Work,
103
- excluding those notices that do not pertain to any part of
104
- the Derivative Works; and
105
-
106
- (d) If the Work includes a "NOTICE" text file as part of its
107
- distribution, then any Derivative Works that You distribute must
108
- include a readable copy of the attribution notices contained
109
- within such NOTICE file, excluding those notices that do not
110
- pertain to any part of the Derivative Works, in at least one
111
- of the following places: within a NOTICE text file distributed
112
- as part of the Derivative Works; within the Source form or
113
- documentation, if provided along with the Derivative Works; or,
114
- within a display generated by the Derivative Works, if and
115
- wherever such third-party notices normally appear. The contents
116
- of the NOTICE file are for informational purposes only and
117
- do not modify the License. You may add Your own attribution
118
- notices within Derivative Works that You distribute, alongside
119
- or as an addendum to the NOTICE text from the Work, provided
120
- that such additional attribution notices cannot be construed
121
- as modifying the License.
122
-
123
- You may add Your own copyright statement to Your modifications and
124
- may provide additional or different license terms and conditions
125
- for use, reproduction, or distribution of Your modifications, or
126
- for any such Derivative Works as a whole, provided Your use,
127
- reproduction, and distribution of the Work otherwise complies with
128
- the conditions stated in this License.
129
-
130
- 5. Submission of Contributions. Unless You explicitly state otherwise,
131
- any Contribution intentionally submitted for inclusion in the Work
132
- by You to the Licensor shall be under the terms and conditions of
133
- this License, without any additional terms or conditions.
134
- Notwithstanding the above, nothing herein shall supersede or modify
135
- the terms of any separate license agreement you may have executed
136
- with Licensor regarding such Contributions.
137
-
138
- 6. Trademarks. This License does not grant permission to use the trade
139
- names, trademarks, service marks, or product names of the Licensor,
140
- except as required for reasonable and customary use in describing the
141
- origin of the Work and reproducing the content of the NOTICE file.
142
-
143
- 7. Disclaimer of Warranty. Unless required by applicable law or
144
- agreed to in writing, Licensor provides the Work (and each
145
- Contributor provides its Contributions) on an "AS IS" BASIS,
146
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
- implied, including, without limitation, any warranties or conditions
148
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
- PARTICULAR PURPOSE. You are solely responsible for determining the
150
- appropriateness of using or redistributing the Work and assume any
151
- risks associated with Your exercise of permissions under this License.
152
-
153
- 8. Limitation of Liability. In no event and under no legal theory,
154
- whether in tort (including negligence), contract, or otherwise,
155
- unless required by applicable law (such as deliberate and grossly
156
- negligent acts) or agreed to in writing, shall any Contributor be
157
- liable to You for damages, including any direct, indirect, special,
158
- incidental, or consequential damages of any character arising as a
159
- result of this License or out of the use or inability to use the
160
- Work (including but not limited to damages for loss of goodwill,
161
- work stoppage, computer failure or malfunction, or any and all
162
- other commercial damages or losses), even if such Contributor
163
- has been advised of the possibility of such damages.
164
-
165
- 9. Accepting Warranty or Additional Liability. While redistributing
166
- the Work or Derivative Works thereof, You may choose to offer,
167
- and charge a fee for, acceptance of support, warranty, indemnity,
168
- or other liability obligations and/or rights consistent with this
169
- License. However, in accepting such obligations, You may act only
170
- on Your own behalf and on Your sole responsibility, not on behalf
171
- of any other Contributor, and only if You agree to indemnify,
172
- defend, and hold each Contributor harmless for any liability
173
- incurred by, or claims asserted against, such Contributor by reason
174
- of your accepting any such warranty or additional liability.
175
-
176
- END OF TERMS AND CONDITIONS
177
-
178
- APPENDIX: How to apply the Apache License to your work.
179
-
180
- To apply the Apache License to your work, attach the following
181
- boilerplate notice, with the fields enclosed by brackets "[]"
182
- replaced with your own identifying information. (Don't include
183
- the brackets!) The text should be enclosed in the appropriate
184
- comment syntax for the file format. We also recommend that a
185
- file or class name and description of purpose be included on the
186
- same "printed page" as the copyright notice for easier
187
- identification within third-party archives.
188
-
189
- Copyright [yyyy] [name of copyright owner]
190
-
191
- Licensed under the Apache License, Version 2.0 (the "License");
192
- you may not use this file except in compliance with the License.
193
- You may obtain a copy of the License at
194
-
195
- http://www.apache.org/licenses/LICENSE-2.0
196
-
197
- Unless required by applicable law or agreed to in writing, software
198
- distributed under the License is distributed on an "AS IS" BASIS,
199
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
- See the License for the specific language governing permissions and
201
- limitations under the License.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/anonymize.py DELETED
@@ -1,255 +0,0 @@
1
- import hashlib
2
- from typing import Optional
3
- import click
4
- import tops
5
- import numpy as np
6
- import tqdm
7
- import moviepy.editor as mp
8
- import cv2
9
- from tops.config import instantiate
10
- from pathlib import Path
11
- from PIL import Image
12
- from dp2 import utils
13
- from detectron2.data.detection_utils import _apply_exif_orientation
14
- from tops import logger
15
- from dp2.utils.bufferless_video_capture import BufferlessVideoCapture
16
-
17
-
18
- def show_video(video_path):
19
- video_cap = cv2.VideoCapture(str(video_path))
20
- while video_cap.isOpened():
21
- ret, frame = video_cap.read()
22
- cv2.imshow("Frame", frame)
23
- key = cv2.waitKey(25)
24
- if key == ord("q"):
25
- break
26
- video_cap.release()
27
- cv2.destroyAllWindows()
28
-
29
-
30
- class ImageIndexTracker:
31
-
32
- def __init__(self, fn) -> None:
33
- self.fn = fn
34
- self.idx = 0
35
-
36
- def fl_image(self, frame):
37
- self.idx += 1
38
- return self.fn(frame, self.idx-1)
39
-
40
-
41
- def anonymize_video(
42
- video_path, output_path: Path,
43
- anonymizer, visualize: bool, max_res: int,
44
- start_time: int, fps: int,
45
- end_time: int,
46
- visualize_detection: bool,
47
- track: bool,
48
- synthesis_kwargs,
49
- **kwargs):
50
- video = mp.VideoFileClip(str(video_path))
51
- if track:
52
- anonymizer.initialize_tracker(video.fps)
53
-
54
- def process_frame(frame, idx):
55
- frame = np.array(resize(Image.fromarray(frame), max_res))
56
- cache_id = hashlib.md5(frame).hexdigest()
57
- frame = utils.im2torch(frame, to_float=False, normalize=False)[0]
58
- cache_id_ = cache_id + str(idx)
59
- synthesis_kwargs["cache_id"] = cache_id_
60
- if visualize_detection:
61
- anonymized = anonymizer.visualize_detection(frame, cache_id=cache_id_)
62
- else:
63
- anonymized = anonymizer(frame, **synthesis_kwargs)
64
- anonymized = utils.im2numpy(anonymized)
65
- if visualize:
66
- cv2.imshow("frame", anonymized[:, :, ::-1])
67
- key = cv2.waitKey(1)
68
- if key == ord("q"):
69
- exit()
70
- return anonymized
71
- video: mp.VideoClip = video.subclip(start_time, end_time)
72
-
73
- if fps is not None:
74
- video = video.set_fps(fps)
75
-
76
- video = video.fl_image(ImageIndexTracker(process_frame).fl_image)
77
- if str(output_path).endswith(".avi"):
78
- output_path = str(output_path).replace(".avi", ".mp4")
79
- if not output_path.parent.exists():
80
- output_path.parent.mkdir(parents=True)
81
- video.write_videofile(str(output_path))
82
-
83
-
84
- def resize(frame: Image.Image, max_res):
85
- if max_res is None:
86
- return frame
87
- f = max(*[x/max_res for x in frame.size], 1)
88
- if f == 1:
89
- return frame
90
- new_shape = [int(x/f) for x in frame.size]
91
- return frame.resize(new_shape, resample=Image.BILINEAR)
92
-
93
-
94
- def anonymize_image(
95
- image_path, output_path: Path, visualize: bool,
96
- anonymizer, max_res: int,
97
- visualize_detection: bool,
98
- synthesis_kwargs,
99
- **kwargs):
100
- with Image.open(image_path) as im:
101
- im = _apply_exif_orientation(im)
102
- orig_im_mode = im.mode
103
-
104
- im = im.convert("RGB")
105
- im = resize(im, max_res)
106
- im = np.array(im)
107
- md5_ = hashlib.md5(im).hexdigest()
108
- im = utils.im2torch(np.array(im), to_float=False, normalize=False)[0]
109
- synthesis_kwargs["cache_id"] = md5_
110
- if visualize_detection:
111
- im_ = anonymizer.visualize_detection(tops.to_cuda(im), cache_id=md5_)
112
- else:
113
- im_ = anonymizer(im, **synthesis_kwargs)
114
- im_ = utils.im2numpy(im_)
115
- if visualize:
116
- while True:
117
- cv2.imshow("frame", im_[:, :, ::-1])
118
- key = cv2.waitKey(0)
119
- if key == ord("q"):
120
- break
121
- elif key == ord("u"):
122
- im_ = utils.im2numpy(anonymizer(im, **synthesis_kwargs))
123
- im = Image.fromarray(im_).convert(orig_im_mode)
124
- if output_path is not None:
125
- output_path.parent.mkdir(exist_ok=True, parents=True)
126
- im.save(output_path, optimize=False, quality=100)
127
- print(f"Saved to: {output_path}")
128
-
129
-
130
- def anonymize_file(input_path: Path, output_path: Optional[Path], **kwargs):
131
- if output_path is not None and output_path.is_file():
132
- logger.warn(f"Overwriting previous file: {output_path}")
133
- if tops.is_image(input_path):
134
- anonymize_image(input_path, output_path, **kwargs)
135
- elif tops.is_video(input_path):
136
- anonymize_video(input_path, output_path, **kwargs)
137
- else:
138
- logger.log(f"Filepath not a video or image file: {input_path}")
139
-
140
-
141
- def anonymize_directory(input_dir: Path, output_dir: Path, **kwargs):
142
- for childname in tqdm.tqdm(input_dir.iterdir()):
143
- childpath = input_dir.joinpath(childname.name)
144
- output_path = output_dir.joinpath(childname.name)
145
- if not childpath.is_file():
146
- anonymize_directory(childpath, output_path, **kwargs)
147
- else:
148
- assert childpath.is_file()
149
- anonymize_file(childpath, output_path, **kwargs)
150
-
151
- def anonymize_webcam(
152
- anonymizer, max_res: int,
153
- synthesis_kwargs,
154
- visualize_detection,
155
- track: bool,
156
- **kwargs):
157
- import time
158
- cap = BufferlessVideoCapture(0, width=1920, height=1080)
159
- t = time.time()
160
- frames = 0
161
- if track:
162
- anonymizer.initialize_tracker(fps=5) # FPS used for tracking objects
163
- while True:
164
- # Capture frame-by-frame
165
- ret, frame = cap.read()
166
- frame = Image.fromarray(frame[:, :, ::-1])
167
- frame = resize(frame, max_res)
168
- frame = np.array(frame)
169
- im = utils.im2torch(np.array(frame), to_float=False, normalize=False)[0]
170
- if visualize_detection:
171
- im_ = anonymizer.visualize_detection(tops.to_cuda(im))
172
- else:
173
- im_ = anonymizer(im, **synthesis_kwargs)
174
- im_ = utils.im2numpy(im_)
175
-
176
- frames += 1
177
- delta = time.time() - t
178
- fps = "?"
179
- if delta > 1e-6:
180
- fps = frames / delta
181
- print(f"FPS: {fps:.3f}", end="\r")
182
- cv2.imshow('frame', im_[:, :, ::-1])
183
- if cv2.waitKey(1) & 0xFF == ord('q'):
184
- break
185
-
186
-
187
- @click.command()
188
- @click.argument("config_path", type=click.Path(exists=True))
189
- @click.option("-i", "--input_path", help="Input path. Accepted inputs: images, videos, directories.")
190
- @click.option("-o", "--output_path", default=None, type=click.Path(), help="Output path to save. Can be directory or file.")
191
- @click.option("-v","--visualize", default=False, is_flag=True, help="Visualize the result")
192
- @click.option("--max-res", default=None, type=int, help="Maximum resolution of height/wideo")
193
- @click.option("--start-time", "--st", default=0, type=int, help="Start time (second) for vide anonymization")
194
- @click.option("--end-time", "--et", default=None, type=int, help="End time (second) for vide anonymization")
195
- @click.option("--fps", default=None, type=int, help="FPS for anonymization")
196
- @click.option("--detection-score-threshold", "--dst", default=.3, type=click.FloatRange(0, 1), help="Detection threshold, threshold applied for all detection models.")
197
- @click.option("--visualize-detection", "--vd",default=False, is_flag=True, help="Visualize only detections without running anonymization.")
198
- @click.option("--multi-modal-truncation", "--mt", default=False, is_flag=True, help="Enable multi-modal truncation proposed by: https://arxiv.org/pdf/2202.12211.pdf")
199
- @click.option("--cache", default=False, is_flag=True, help="Enable detection caching. Will save and load detections from cache.")
200
- @click.option("--amp", default=True, is_flag=True, help="Use automatic mixed precision for generator forward pass")
201
- @click.option("-t", "--truncation_value", default=0, type=click.FloatRange(0, 1), help="Latent interpolation truncation value.")
202
- @click.option("--track", default=False, is_flag=True, help="Track detections over frames. Will use the same latent variable (z) for tracked identities.")
203
- @click.option("--seed", default=0, type=int, help="Set random seed for generating images.")
204
- @click.option("--person-generator", default=None, help="Config path to unconditional person generator", type=click.Path())
205
- @click.option("--cse-person-generator", default=None, help="Config path to CSE-guided person generator", type=click.Path())
206
- @click.option("--webcam", default=False, is_flag=True, help="Read image from webcam feed.")
207
- def anonymize_path(
208
- config_path,
209
- input_path,
210
- output_path,
211
- detection_score_threshold: float,
212
- visualize_detection: bool,
213
- cache: bool,
214
- seed: int,
215
- person_generator: str,
216
- cse_person_generator: str,
217
- webcam: bool,
218
- **kwargs):
219
- """
220
- config_path: Specify the path to the anonymization model to use.
221
- """
222
- tops.set_seed(seed)
223
- cfg = utils.load_config(config_path)
224
- if person_generator is not None:
225
- cfg.anonymizer.person_G_cfg = person_generator
226
- if cse_person_generator is not None:
227
- cfg.anonymizer.cse_person_G_cfg = cse_person_generator
228
- cfg.detector.score_threshold = detection_score_threshold
229
- utils.print_config(cfg)
230
-
231
- anonymizer = instantiate(cfg.anonymizer, load_cache=cache)
232
- synthesis_kwargs = ["amp", "multi_modal_truncation", "truncation_value"]
233
- synthesis_kwargs = {k: kwargs.pop(k) for k in synthesis_kwargs}
234
-
235
- kwargs["anonymizer"] = anonymizer
236
- kwargs["visualize_detection"] = visualize_detection
237
- kwargs["synthesis_kwargs"] = synthesis_kwargs
238
- if webcam:
239
- anonymize_webcam(**kwargs)
240
- return
241
- input_path = Path(input_path)
242
- output_path = Path(output_path) if output_path is not None else None
243
- if output_path is None and not kwargs["visualize"]:
244
- logger.log("Output path not set. Setting visualize to True")
245
- kwargs["visualize"] = True
246
- if input_path.is_dir():
247
- assert output_path is None or not output_path.is_file()
248
- anonymize_directory(input_path, output_path, **kwargs)
249
- else:
250
- anonymize_file(input_path, output_path, **kwargs)
251
-
252
-
253
- if __name__ == "__main__":
254
-
255
- anonymize_path()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/attribute_guided_demo.py DELETED
@@ -1,144 +0,0 @@
1
- from collections import defaultdict
2
- import gradio
3
- import numpy as np
4
- import torch
5
- import cv2
6
- from PIL import Image
7
- from dp2 import utils
8
- from tops.config import instantiate
9
- import tops
10
- import gradio.inputs
11
- from stylemc import get_and_cache_direction, get_styles
12
-
13
-
14
- class GuidedDemo:
15
- def __init__(self, face_anonymizer, cfg_face) -> None:
16
- self.anonymizer = face_anonymizer
17
- assert sum([x is not None for x in list(face_anonymizer.generators.values())]) == 1
18
- self.generator = [x for x in list(face_anonymizer.generators.values()) if x is not None][0]
19
- face_G_cfg = utils.load_config(cfg_face.anonymizer.face_G_cfg)
20
- face_G_cfg.train.batch_size = 1
21
- self.dl = instantiate(face_G_cfg.data.val.loader)
22
- self.cache_dir = face_G_cfg.output_dir
23
- self.precompute_edits()
24
-
25
- def precompute_edits(self):
26
- self.precomputed_edits = set()
27
- for edit in self.precomputed_edits:
28
- get_and_cache_direction(self.cache_dir, self.dl, self.generator, edit)
29
- if self.cache_dir.joinpath("stylemc_cache").is_dir():
30
- for path in self.cache_dir.joinpath("stylemc_cache").iterdir():
31
- text_prompt = path.stem.replace("_", " ")
32
- self.precomputed_edits.add(text_prompt)
33
- print(text_prompt)
34
- self.edits = defaultdict(defaultdict)
35
-
36
- def anonymize(self, img, show_boxes: bool, current_box_idx: int, current_styles, current_boxes, update_identity, edits, cache_id=None):
37
- if not isinstance(img, torch.Tensor):
38
- img, cache_id = pil2torch(img)
39
- img = tops.to_cuda(img)
40
-
41
- current_box_idx = current_box_idx % len(current_boxes)
42
- edited_styles = [s.clone() for s in current_styles]
43
- for face_idx, face_edits in edits.items():
44
- for prompt, strength in face_edits.items():
45
- direction = get_and_cache_direction(self.cache_dir, self.dl, self.generator, prompt)
46
- edited_styles[int(face_idx)] += direction * strength
47
- update_identity[int(face_idx)] = True
48
- assert img.dtype == torch.uint8
49
- img = self.anonymizer(
50
- img, truncation_value=0,
51
- multi_modal_truncation=True, amp=True,
52
- cache_id=cache_id,
53
- all_styles=edited_styles,
54
- update_identity=update_identity)
55
- update_identity = [True for i in range(len(update_identity))]
56
- img = utils.im2numpy(img)
57
- if show_boxes:
58
- x0, y0, x1, y1 = [int(_) for _ in current_boxes[int(current_box_idx)]]
59
- img = cv2.rectangle(img, (x0, y0), (x1, y1), (255, 0, 0), 1)
60
- return img, update_identity
61
-
62
- def update_image(self, img, show_boxes):
63
- img, cache_id = pil2torch(img)
64
- img = tops.to_cuda(img)
65
- det = self.anonymizer.detector.forward_and_cache(img, cache_id, load_cache=True)[0]
66
- current_styles = []
67
- for i in range(len(det)):
68
- # Need to do forward pass to register all affine modules.
69
- batch = det.get_crop(i, img)
70
- batch["condition"] = batch["img"].float()
71
-
72
- s = get_styles(
73
- np.random.randint(0, 999999),self.generator,
74
- batch, truncation_value=0)
75
- current_styles.append(s)
76
- update_identity = [True for i in range(len(det))]
77
- current_boxes = np.array(det.boxes)
78
- edits = defaultdict(defaultdict)
79
- cur_face_idx = -1 % len(current_boxes)
80
- img, update_identity = self.anonymize(img, show_boxes, cur_face_idx, current_styles, current_boxes, update_identity, edits, cache_id=cache_id)
81
- return img, current_styles, current_boxes, update_identity, edits, cur_face_idx
82
-
83
- def change_face(self, change, cur_face_idx, current_boxes, input_image, show_boxes, current_styles, update_identity, edits):
84
- cur_face_idx = (cur_face_idx+change) % len(current_boxes)
85
- img, update_identity = self.anonymize(input_image, show_boxes, cur_face_idx, current_styles, current_boxes, update_identity, edits)
86
- return img, update_identity, cur_face_idx
87
-
88
- def add_style(self, face_idx: int, prompt: str, strength: float, input_image, show_boxes, current_styles, current_boxes, update_identity, edits):
89
- face_idx = face_idx % len(current_boxes)
90
- edits[face_idx][prompt] = strength
91
- img, update_identity = self.anonymize(input_image, show_boxes, face_idx, current_styles, current_boxes, update_identity, edits)
92
- return img, update_identity, edits
93
-
94
- def setup_interface(self):
95
- current_styles = gradio.State()
96
- current_boxes = gradio.State(None)
97
- update_identity = gradio.State([])
98
- edits = gradio.State([])
99
- with gradio.Row():
100
- input_image = gradio.Image(
101
- type="pil", label="Upload your image or try the example below!",source="webcam")
102
- output_image = gradio.Image(type="numpy", label="Output")
103
- with gradio.Row():
104
- update_btn = gradio.Button("Update Anonymization").style(full_width=True)
105
- with gradio.Row():
106
- show_boxes = gradio.Checkbox(value=True, label="Show Selected")
107
- cur_face_idx = gradio.Number(value=-1,label="Current", interactive=False)
108
- previous = gradio.Button("Previous Person")
109
- next_ = gradio.Button("Next Person")
110
- with gradio.Row():
111
- text_prompt = gradio.Textbox(
112
- placeholder=" | ".join(list(self.precomputed_edits)),
113
- label="Text Prompt for Edit")
114
- edit_strength = gradio.Slider(0, 5, step=.01)
115
- add_btn = gradio.Button("Add Edit")
116
- add_btn.click(self.add_style, inputs=[cur_face_idx, text_prompt, edit_strength, input_image, show_boxes, current_styles, current_boxes, update_identity, edits], outputs=[output_image, update_identity, edits])
117
- update_btn.click(self.update_image, inputs=[input_image, show_boxes], outputs=[output_image, current_styles, current_boxes, update_identity, edits, cur_face_idx])
118
- input_image.change(self.update_image, inputs=[input_image, show_boxes], outputs=[output_image, current_styles, current_boxes, update_identity, edits, cur_face_idx])
119
- previous.click(self.change_face, inputs=[gradio.State(-1), cur_face_idx, current_boxes, input_image, show_boxes, current_styles, update_identity, edits], outputs=[output_image, update_identity, cur_face_idx])
120
- next_.click(self.change_face, inputs=[gradio.State(1), cur_face_idx, current_boxes, input_image, show_boxes, current_styles, update_identity, edits], outputs=[output_image, update_identity, cur_face_idx])
121
-
122
- show_boxes.change(self.anonymize, inputs=[input_image, show_boxes, cur_face_idx, current_styles, current_boxes, update_identity, edits], outputs=[output_image, update_identity])
123
-
124
-
125
- def pil2torch(img: Image.Image):
126
- img = img.convert("RGB")
127
- img = np.array(img)
128
- img = np.rollaxis(img, 2)
129
- return torch.from_numpy(img), None
130
-
131
-
132
- cfg_face = utils.load_config("configs/anonymizers/face.py")
133
- anonymizer_face = instantiate(cfg_face.anonymizer, load_cache=False)
134
- anonymizer_face.initialize_tracker(fps=1)
135
-
136
-
137
- with gradio.Blocks() as demo:
138
- gradio.Markdown("# <center> DeepPrivacy2 - Realistic Image Anonymization </center>")
139
- gradio.Markdown("### <center> Håkon Hukkelås, Rudolf Mester, Frank Lindseth </center>")
140
- with gradio.Tab("Text-Guided Anonymization"):
141
- GuidedDemo(anonymizer_face, cfg_face).setup_interface()
142
-
143
-
144
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/readme.md DELETED
@@ -1,209 +0,0 @@
1
- # DeepPrivacy2 - A Toolbox for Realistic Image Anonymization
2
- [[PDF]](http://arxiv.org/abs/2211.09454) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/haakohu/deep_privacy2)
3
- [[Video Demo]](https://youtu.be/Kt3au719hhk)
4
- [[WACV 2023 Presentation]](https://youtu.be/wwKRkkzxKuM)
5
-
6
- ![](media/g7_leaders.jpg)
7
- DeepPrivacy2 is a toolbox for realistic anonymization of humans, including a face and a full-body anonymizer.
8
-
9
-
10
- ![](media/header.png)
11
- DeepPrivacy2 detects and anonymizes individuals via three detection and synthesis networks; (1) a CSE-guided generator for individuals detected with dense pose (by CSE), (2) an unconditional full-body generator for cases where CSE fails to detect (note the segmented persons without color-coded CSE detections), and (3) a face generator for the remaining individuals (marked in red).
12
-
13
-
14
- ## What's new
15
-
16
- This repository improves over the original [DeepPrivacy](https://github.com/hukkelas/DeepPrivacy) repository with the following new features:
17
- - **Full-body anonymization:** Anonymize the entire human body with a single generator
18
- - **Improved Face Anonymization:** Improved quality and higher resolution (256x256 vs. 128x128) face anonymization without relying on facial landmark detection.
19
- - **Attribute Guided Anonymiation:** Anonymize faces guided on text prompts using [StyleMC](https://github.com/catlab-team/stylemc).
20
- - **Code cleanup and general improvements:** Extensive refactoring, bugfixes, and improvements yielding improved results and faster training.
21
-
22
- ## Installation
23
- ### Requirements
24
- - Pytorch >= 1.10
25
- - Torchvision >= 0.12
26
- - Python >= 3.8
27
- - CUDA capable device for training. Training was done with 1-8 32GB V100 GPUs.
28
-
29
-
30
- ### Installation
31
- We recommend to setup and install pytorch with [anaconda](https://www.anaconda.com/) following the [pytorch installation instructions](https://pytorch.org/get-started/locally/).
32
-
33
- 1. Clone repository: `git clone https://github.com/hukkelas/deep_privacy2/`.
34
- 2. Install using `setup.py`:
35
- ```
36
- pip install -e .
37
- ```
38
- or:
39
- ```
40
- pip install git+https://github.com/hukkelas/deep_privacy2/
41
- ```
42
-
43
- ### Installation with Docker
44
-
45
- 1. Install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) to support GPU acceleration.
46
- 2. Build the docker image using the [Dockerfile](Dockerfile).
47
- ```bash
48
- # If you're not planning to train the network (or not use wandb logging), you can remove the WANDB_API_KEY argument.
49
- docker build -t deep_privacy2 --build-arg WANDB_API_KEY=YOUR_WANDB_KEY --build-arg UID=$(id -u) --build-arg UNAME=$(id -un) .
50
- ```
51
- 3. Run the docker image with selected command:
52
- ```
53
- docker run --runtime=nvidia --gpus '"device=0"' --name deep_privacy2 --ipc=host -u $(id -u) -v $PWD:/home/$(id -un) --rm deep_privacy2 python3 anonymize.py configs/anonymizers/deep_privacy1.py -i media/regjeringen.jpg -o output.png
54
- ```
55
-
56
-
57
- ## Anonymization
58
- [anonymize.py](anonymize.py) is the main script for anonymization.
59
-
60
- The typical usage is
61
- ```
62
- python3 anonymize.py configs/anonymizers/FB_cse.py -i path_to_image.png
63
- ```
64
- where the first argument is the chosen anonymizer (see below for the different models) and the second a path to an image/folder/video.
65
-
66
- There are several optional arguments, see `python3 anonymize.py --help` for more info.
67
- ```
68
- python3 anonymize.py -h
69
- Usage: anonymize.py [OPTIONS] CONFIG_PATH
70
-
71
- config_path: Specify the path to the anonymization model to use.
72
-
73
- Options:
74
- -i, --input_path PATH Input path. Accepted inputs: images, videos,
75
- directories.
76
- -o, --output_path PATH Output path to save. Can be directory or
77
- file.
78
- --visualize Visualize the result
79
- --max_res INTEGER Maximum resolution of height/wideo
80
- --start-time, --st INTEGER Start time (second) for vide anonymization
81
- --end-time, --et INTEGER End time (second) for vide anonymization
82
- --fps INTEGER FPS for anonymization
83
- --detection-score-threshold FLOAT RANGE
84
- Detection threshold, threshold applied for
85
- all detection models. [0<=x<=1]
86
- --visualize-detection Visualize only detections without running
87
- anonymization.
88
- --multi-modal-truncation, --mt Enable multi-modal truncation proposed by:
89
- https://arxiv.org/pdf/2202.12211.pdf
90
- --no-cache Disable loading of detection cache. Will
91
- rerun all detections.
92
- --amp Use automatic mixed precision for generator
93
- forward pass
94
- -t, --truncation_value FLOAT RANGE
95
- Latent interpolation truncation value.
96
- [0<=x<=1]
97
- --track Track detections over frames. Will use the
98
- same latent variable (z) for tracked
99
- identities.
100
- --seed INTEGER Set random seed for generating images.
101
- --person-generator PATH Config path to unconditional person
102
- generator
103
- --cse-person-generator PATH Config path to CSE-guided person generator
104
- --webcam Read image from webcam feed.
105
- --help Show this message and exit.
106
-
107
- ```
108
-
109
- **Singe image anonymization**
110
- ```
111
- python3 anonymize.py configs/anonymizers/FB_cse.py -i path_to_image.png --output_path output.png
112
- ```
113
-
114
- **Folder anonymization**
115
-
116
- If a folder is given as the input, all image and video files in the given folder will be anonymized and placed under --output_path. The script will duplicate the directory structure/filenames in the given folder for the output.
117
- ```
118
- python3 anonymize.py configs/anonymizers/FB_cse.py -i path/to/input/folder --output_path output_folder
119
- ```
120
-
121
- **Video anonymization**
122
- ```
123
- python3 anonymize.py configs/anonymizers/FB_cse.py -i path_to_video.mp4 --output_path output.mp4
124
- ```
125
-
126
- **Webcam anonymization**
127
- ```
128
- python3 anonymize.py configs/anonymizers/FB_cse.py --webcam
129
- ```
130
-
131
- ### Available anonymization models
132
- DeepPrivacy2 provides the following anonymization models:
133
-
134
- - [`configs/anonymizers/FB_cse.py`](configs/anonymizers/FB_cse.py): Full-body anonymizer that only anonymizes individuals detected by CSE. This provides the highest quality anonymization, however, some individuals might not be detected by CSE.
135
- - [`configs/anonymizers/FB_cse_mask.py`](configs/anonymizers/FB_cse_mask.py): Full-body anonymizer that anonymizes all individuals detected by CSE or Mask R-CNN. In difference from `configs/anonymizers/FB_cse.py`, this model anonymizes individuals not detected by CSE with an unguided generator.
136
- - [`configs/anonymizers/FB_cse_mask_face.py`](configs/anonymizers/FB_cse_mask_face.py): Full-body and face anonymizer that anonymizes all individuals detected by CSE, Mask R-CNN or by face detection. Compared to `configs/anonymizers/FB_cse_mask.py`, this model anonymizes individuals not detected by CSE or Mask R-CNN with a face anonymizer.
137
- - [`configs/anonymizers/face.py`](configs/anonymizers/face.py): The face anonymizer only anonymizes a center crop of the face.
138
- - [`configs/anonymizers/face_fdf128.py`](configs/anonymizers/face_fdf128.py): Same as [`configs/anonymizers/face.py`](configs/anonymizers/face.py), but the generator is trained on lower resolution images (128x128 or lower). Recommended to use if you will not anonymize any faces larger than 128x128. **Model will be released soon.**
139
-
140
- ## Attribute guided anonymization
141
- DeepPrivacy2 allows for controllable anonymization through text prompts by adapting [StyleMC](https://github.com/catlab-team/stylemc).
142
- StyleMC finds global semantically meaningful directions in the GAN latent space by manipulating images towards a given text prompt with a [CLIP](https://github.com/openai/CLIP)-based loss.
143
- ![](media/stylemc_example.jpg)
144
-
145
- The repository includes a ![gradio](https://gradio.app/) demo for interactive text-guided anonymization.
146
- To use the demo, first:
147
-
148
- 1. Download the FDF256 dataset (see below). Only the validation set is required.
149
- 2. Run the following:
150
- ```
151
- python3 attribute_guided_demo.py
152
- ```
153
-
154
- The script will spin up a local webserver.
155
-
156
-
157
- ## Training
158
- First, download dataset for training (see below).
159
-
160
- To start training, type the following:
161
- ```
162
- python3 train.py configs/fdh/styleganL.py
163
- ```
164
- The training automatically logs to [wandb](https://wandb.ai/).
165
-
166
- ### Model development utility scripts
167
- **Dataset inspection:** To inspect the training dataset, you can use:
168
- ```
169
- python3 -m tools.inspect_dataset configs/fdh/styleganL.py
170
- ```
171
-
172
- **Sanity check:**
173
- ```
174
- python3 -m tools.dryrun configs/fdh/styleganL.py
175
- ```
176
-
177
- **Output visualization:** To visualize output of trained models:
178
- ```
179
- python3 -m tools.show_examples configs/fdh/styleganL.py
180
- ```
181
-
182
-
183
- #### Calculating metrics
184
- ```
185
- python3 validate.py configs/fdh/styleganL.py
186
- ```
187
- **NOTE:** The metrics calculated with validate.py will slightly differ from training metrics, as validate.py disables automatic mixed precision.
188
-
189
-
190
- ## Dataset Setup
191
-
192
- **Setting Data directory:**
193
- The default dataset directory is ./data. If you want to change the dataset directory, set the environment variable `BASE_DATASET_DIR`. For example, `export BASE_DATASET_DIR=/work/data/`.
194
-
195
-
196
- ### FDF256
197
- Follow the instructions [here](https://github.com/hukkelas/FDF/blob/master/FDF256.md) to download the FDF256 dataset. The dataset should be placed in the directory: `data/fdf256`.
198
-
199
- ### FDH
200
- Follow the instructions [here](https://www.github.com/hukkelas/FDH) to download the FDH dataset. The dataset should be placed in the directory: `data/fdh`.
201
-
202
-
203
- ## License
204
- This repsitory is released under [Apache 2.0 License](License), except for the following:.
205
-
206
- - Code under `sg3_torch_utils/`. This code is modified from [github.com/NVlabs/stylegan2-ada-pytorch](https://github.com/NVlabs/stylegan2-ada-pytorch). Separate license is attached in the directory.
207
- - Detection network: See [Detectron2 License](https://github.com/facebookresearch/detectron2/blob/main/LICENSE).
208
- - All checkpoints follow the license of the datasets. See the respective datasets for more information.
209
- - Code under `dp2/detection/models/vit_pose`. This code is modified from [https://github.com/gpastal24/ViTPose-Pytorch](https://github.com/gpastal24/ViTPose-Pytorch), where code is adapted from OpenMMLab. Original license is [Apache 2-0](https://github.com/open-mmlab/mmpose/blob/master/LICENSE).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/setup.py DELETED
@@ -1,46 +0,0 @@
1
- import torch
2
- import torchvision
3
- from setuptools import setup, find_packages
4
-
5
- torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
6
- assert torch_ver >= [1, 9], "Requires PyTorch >= 1.9"
7
- torchvision_ver = [int(x) for x in torchvision.__version__.split(".")[:2]]
8
- assert torchvision_ver >= [0, 11], "Requires torchvision >= 0.11"
9
-
10
- setup(
11
- name="dp2",
12
- version="0.1.0",
13
- packages=find_packages(),
14
- install_requires=[
15
- "numpy>=1.20",
16
- "cython",
17
- "matplotlib",
18
- "tqdm",
19
- "tensorboard",
20
- "opencv-python",
21
- "detectron2-densepose@git+https://github.com/facebookresearch/detectron2@96c752ce821a3340e27edd51c28a00665dd32a30#subdirectory=projects/DensePose",
22
- "torch_fidelity==0.3.0",
23
- "ninja==1.10.2",
24
- "moviepy",
25
- "pyspng",
26
- "face_detection@git+https://github.com/hukkelas/DSFD-Pytorch-Inference",
27
- "wandb",
28
- "termcolor",
29
- "tops@git+https://github.com/hukkelas/torch_ops.git",
30
- "motpy@git+https://github.com/wmuron/motpy@c77f85d27e371c0a298e9a88ca99292d9b9cbe6b",
31
- "fast_pytorch_kmeans",
32
- "einops",
33
- "einops_exts",
34
- "regex",
35
- "setuptools==59.5.0",
36
- "resize_right==0.0.2",
37
- "pillow==8.3.1",
38
- "scipy==1.7.1",
39
- "webdataset==0.2.26",
40
- "scikit-image",
41
- "imageio==2.4.1",
42
- "timm==0.6.7",
43
- "clip@git+https://github.com/openai/CLIP.git@b46f5ac7587d2e1862f8b7b1573179d80dcdd620",
44
-
45
- ],
46
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/stylemc.py DELETED
@@ -1,180 +0,0 @@
1
- """
2
- Approach: "StyleMC: Multi-Channel Based Fast Text-Guided Image Generation and Manipulation"
3
- Original source code:
4
- https://github.com/autonomousvision/stylegan_xl/blob/f9be58e98110bd946fcdadef2aac8345466faaf3/run_stylemc.py#
5
- Modified by Håkon Hukkelås
6
- """
7
- import click
8
- from pathlib import Path
9
- import tqdm
10
- from dp2 import utils
11
- import tops
12
- from timeit import default_timer as timer
13
- import torch
14
- import torch.nn.functional as F
15
- from torchvision.transforms.functional import resize, normalize
16
- import clip
17
- from dp2.gan_trainer import AverageMeter
18
- from tops.config import instantiate
19
- from dp2.utils import vis_utils
20
-
21
-
22
- def spherical_dist_loss(x, y):
23
- x = F.normalize(x, dim=-1)
24
- y = F.normalize(y, dim=-1)
25
- return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
26
-
27
-
28
- def prompts_dist_loss(x, targets):
29
- loss = spherical_dist_loss
30
- if len(targets) == 1:
31
- return loss(x, targets[0])
32
- distances = [loss(x, target) for target in targets]
33
- return torch.stack(distances, dim=-1).sum(dim=-1)
34
-
35
- affine_modules = None
36
- max_ch = None
37
-
38
- @torch.no_grad()
39
- def init_affine_modules(G, batch):
40
- global affine_modules, max_ch
41
- affine_modules = []
42
- max_ch = 0
43
- def forward_hook(block, input_ ,output_):
44
- global max_ch
45
- affine_modules.append(block)
46
- max_ch = max(max_ch, block.affine.out_features*(1+hasattr(block, "affine_beta")))
47
- removable_handles = []
48
- for block in G.modules():
49
- if hasattr(block, "affine") and hasattr(block.affine, "weight"):
50
- removable_handles.append(block.register_forward_hook(forward_hook))
51
- G(**batch)
52
- for hook in removable_handles:
53
- hook.remove()
54
-
55
- @torch.no_grad()
56
- def get_styles(seed, G: torch.nn.Module, batch, truncation_value=1):
57
- global affine_modules, max_ch
58
- if affine_modules is None:
59
- init_affine_modules(G, batch)
60
- w = G.style_net.get_truncated(truncation_value, **batch, seed=seed)
61
-
62
- all_styles = torch.zeros((len(affine_modules), max_ch), device=batch["img"].device, dtype=torch.float32)
63
- for i, block in enumerate(affine_modules):
64
- gamma0 = block.affine(w)
65
- if hasattr(block, "affine_beta"):
66
- beta0 = block.affine_beta(w)
67
- gamma0 = torch.cat((gamma0, beta0), dim=1)
68
- all_styles[i] = F.pad(gamma0, ((0, max_ch - gamma0.shape[-1])), "constant", 0)
69
-
70
- return all_styles
71
-
72
-
73
- def get_and_cache_direction(output_dir: Path, dl_val, G, text_prompt):
74
- cache_path = output_dir.joinpath(
75
- "stylemc_cache", text_prompt.replace(" ", "_") + ".torch")
76
- if cache_path.is_file():
77
- print("Loaded cache from:", cache_path)
78
- return torch.load(cache_path)
79
- direction = find_direction(G, text_prompt, dl_val=iter(dl_val))
80
- cache_path.parent.mkdir(exist_ok=True, parents=True)
81
- torch.save(direction, cache_path)
82
- return direction
83
-
84
-
85
- @torch.cuda.amp.autocast()
86
- def find_direction(
87
- G,
88
- text_prompt,
89
- n_iterations=128*8*10,
90
- batch_size=8,
91
- dl_val=None
92
- ):
93
- time_start = timer()
94
- clip_model = clip.load("ViT-B/16", device=tops.get_device())[0]
95
- target = [clip_model.encode_text(clip.tokenize(text_prompt).to(tops.get_device())).float()]
96
- first_batch = next(dl_val)
97
- first_batch["embedding"] = None if "embedding" not in first_batch else first_batch["embedding"]
98
- s = get_styles(0, G, first_batch, 0)
99
- # stats tracker
100
- tracker = AverageMeter()
101
- n_iterations = n_iterations // batch_size
102
-
103
- # initalize styles direction
104
- direction = torch.zeros(s.shape, device=tops.get_device())
105
- direction.requires_grad_()
106
- utils.set_requires_grad(G, False)
107
- direction_tracker = torch.zeros_like(direction)
108
- opt = torch.optim.AdamW([direction], lr=0.05, betas=(0., 0.999), weight_decay=0.25)
109
-
110
- grads = []
111
- for seed_idx in tqdm.trange(n_iterations):
112
- # forward pass through synthesis network with new styles
113
- if seed_idx == 0:
114
- batch = first_batch
115
- else:
116
- batch = next(dl_val)
117
- batch["embedding"] = None if "embedding" not in batch else batch["embedding"]
118
- styles = get_styles(seed_idx, G, batch) + direction
119
- img = G(**batch, s=iter(styles))["img"]
120
-
121
- # clip loss
122
- img = (img + 1)/2
123
- img = normalize(img, mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
124
- img = resize(img, (224, 224))
125
- embeds = clip_model.encode_image(img)
126
- cos_sim = prompts_dist_loss(embeds, target)
127
- cos_sim.backward(retain_graph=True)
128
- # track stats
129
- tracker.update(dict(cos_sim=cos_sim, norm=torch.norm(direction)))
130
- if not (seed_idx % batch_size):
131
- opt.step()
132
- grads.append(direction.grad.clone())
133
- direction.grad.data.zero_()
134
- print(tracker.get_average())
135
- tracker = AverageMeter()
136
-
137
- # throw out fluctuating channels
138
- direction = direction.detach()
139
- direction[direction_tracker > n_iterations / 4] = 0
140
- print(direction)
141
- print(f"Time for direction search: {timer() - time_start:.2f} s")
142
- return direction
143
-
144
-
145
- @click.command()
146
- @click.argument("config_path")
147
- @click.argument("text_prompt")
148
- @click.option("-n", default=50, type=int)
149
- def main(config_path: str, text_prompt: str, n: int):
150
- from dp2.infer import build_trained_generator
151
- from PIL import Image
152
- cfg = utils.load_config(config_path)
153
- G = build_trained_generator(cfg)
154
- cfg.train.batch_size = 1
155
- dl_val = instantiate(cfg.data.val.loader)
156
- direction = get_and_cache_direction(cfg.output_dir, dl_val, G, text_prompt)
157
- output_dir = Path("stylemc_results")
158
- output_dir.mkdir(exist_ok=True, parents=True)
159
- save = lambda x, path: Image.fromarray(utils.im2numpy(x, True, True)[0]).save(path)
160
- strenghts = [0, 0.05, 0.1, 0.2, 0.3, 0.4, 1.0]
161
- for i, batch in enumerate(iter(dl_val)):
162
- imgs = []
163
-
164
- img = vis_utils.visualize_batch(**batch)
165
- img = tops.im2numpy(img, False)[0]
166
- imgs.append(img)
167
- if i > n:
168
- break
169
- for strength in strenghts:
170
- styles = get_styles(i, G, batch, truncation_value=0) + direction*strength
171
- img = G(**batch, s=iter(styles))["img"]
172
- imgs.append(utils.im2numpy(img, True, True)[0])
173
-
174
- img = tops.np_make_image_grid(imgs, nrow=1)
175
- Image.fromarray(img).save(output_dir.joinpath(f"results_{i}.png"))
176
-
177
-
178
- if __name__ == "__main__":
179
- main()
180
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/tools/__init__.py DELETED
File without changes
deep_privacy/tools/compute_cluster_means.py DELETED
@@ -1,47 +0,0 @@
1
- # Compute k-means cluster for W (Self-Distilled StyleGAN: Towards Generation from Internet Photos)
2
- # pip install fast-pytorch-kmeans
3
- import click
4
- import tqdm
5
- import torch
6
- from dp2.utils import load_config
7
- from dp2.infer import build_trained_generator
8
- import tops
9
- from tops.checkpointer.checkpointer import get_ckpt_paths, load_checkpoint
10
- from fast_pytorch_kmeans import KMeans
11
-
12
-
13
- @click.command()
14
- @click.argument("config_path")
15
- @click.option("-n", "--n_samples", default=int(600e3), type=int)
16
- @click.option( "--n_centers", "--nc", default=512, type=int)
17
- @click.option( "--batch_size", default=512, type=int)
18
- def compute_cluster_means(config_path, n_samples, n_centers, batch_size):
19
- cfg = load_config(config_path)
20
- G = build_trained_generator(cfg, map_location=torch.device("cpu"))
21
- n_batches = n_samples // batch_size
22
- n_samples = n_samples // batch_size * batch_size
23
- print("Computing clusters over", n_samples, "samples.")
24
- style_net = G.stylenet if hasattr(G, "stylenet") else G.style_net
25
- style_net = tops.to_cuda(style_net)
26
- w_dim = style_net.w_dim
27
- z_dim = style_net.z_dim
28
- with torch.inference_mode():
29
- w = torch.zeros((n_samples, w_dim), device=tops.get_device(), dtype=torch.float32)
30
-
31
- for i in tqdm.trange(n_batches):
32
- w[i*batch_size:(i+1)*batch_size] = style_net(torch.randn((batch_size, z_dim), device=tops.get_device())).cpu()
33
- kmeans = KMeans(n_clusters=n_centers, mode='euclidean', verbose=10, max_iter=1000, tol=0.00001)
34
-
35
- kmeans.fit_predict(w)
36
- centers = kmeans.centroids
37
-
38
- if hasattr(style_net, "w_centers"):
39
- del style_net.w_centers
40
- style_net.register_buffer("w_centers", centers)
41
- ckpt_path = get_ckpt_paths(cfg.checkpoint_dir)[-1]
42
- ckpt = load_checkpoint(ckpt_path, map_location="cpu")
43
- ckpt["EMA_generator"] = G.state_dict()
44
- torch.save(ckpt, ckpt_path)
45
-
46
- compute_cluster_means()
47
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/tools/dryrun.py DELETED
@@ -1,49 +0,0 @@
1
- import click
2
- import torch
3
- import tops
4
- from tops.config import instantiate
5
- from dp2 import utils
6
-
7
- @click.command()
8
- @click.argument("config_path")
9
- def run(config_path):
10
- cfg = utils.load_config(config_path)
11
- utils.print_config(cfg)
12
-
13
- G = tops.to_cuda(instantiate(cfg.generator))
14
-
15
- D = tops.to_cuda(instantiate(cfg.discriminator))
16
- cfg.train.batch_size = 2
17
- print(G)
18
- dl_val = instantiate(cfg.data.val.loader)
19
- cfg.train.amp.scaler_D.init_scale = 1
20
- scaler = instantiate(cfg.train.amp.scaler_D)
21
- loss_fnc = instantiate(cfg.loss_fnc, D=D, G=G)
22
- batch = next(iter(dl_val))
23
- tops.print_module_summary(G, batch, max_nesting=10)
24
- # tops.print_module_summary(D, batch, max_nesting=10)
25
-
26
- print("G PARAMS:", tops.num_parameters(G) / 10 ** 6)
27
- print("D PARAMS:", tops.num_parameters(D) / 10 ** 6)
28
- print(f"Number of trainable parameters in D: {sum(p.numel() for p in D.parameters() if p.requires_grad)/10**6}M")
29
- print(f"Number of trainable parameters in G: {sum(p.numel() for p in G.parameters() if p.requires_grad)/10**6}M" )
30
-
31
- with torch.cuda.amp.autocast(True):
32
- o_G = G(**batch)
33
- o_D = D(**batch)
34
- print("FORWARD OK")
35
- D_loss, to_log = loss_fnc.D_loss(batch, grad_scaler=scaler)
36
- D_loss.backward()
37
- assert all([p.grad is not None or not p.requires_grad for p in D.parameters()])
38
- print(to_log)
39
-
40
- G_loss, _ = loss_fnc.G_loss(batch, grad_scaler=scaler)
41
- G_loss.backward()
42
- G: torch.nn.Module = G
43
- for name, p in G.named_parameters():
44
- if p.grad is None and p.requires_grad:
45
- print(name)
46
- assert all([p.grad is not None or not p.requires_grad for p in G.parameters()])
47
-
48
- if __name__ == "__main__":
49
- run()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/tools/inspect_dataset.py DELETED
@@ -1,52 +0,0 @@
1
- import cv2
2
- import torch
3
- import click
4
- import tops
5
- from tops.config import LazyConfig, instantiate
6
- from dp2 import utils
7
- from dp2.utils import vis_utils
8
- import numpy as np
9
- from PIL import Image
10
-
11
- def get_image(batch, cfg, fscale_vis):
12
- im0 = batch["condition"]
13
- im1 = batch["img"]
14
- im = utils.denormalize_img(torch.cat((im0, im1), dim=-1)).mul(255).byte()
15
- im = torch.cat((im, vis_utils.visualize_batch(**batch)), dim=-1)
16
-
17
- im = utils.im2numpy(im)
18
-
19
- im = tops.np_make_image_grid(im, nrow=len(im0))
20
- if fscale_vis != 1:
21
- new_shape = [int(_*fscale_vis) for _ in im.shape[:2][::-1]]
22
- im = np.array(Image.fromarray(im).resize(new_shape))
23
- return im
24
-
25
-
26
- @click.command()
27
- @click.argument("config_path")
28
- @click.option("--train", default=False, is_flag=True)
29
- @click.option("-n", "--num_images", default=8, type=int)
30
- @click.option("-f", "--fscale_vis", default=1)
31
- def main(config_path: str, train: bool, num_images: int, fscale_vis):
32
- cfg = LazyConfig.load(config_path)
33
- if train:
34
- dl_cfg = cfg.data.train.loader
35
- else:
36
- dl_cfg = cfg.data.val.loader
37
- dl_cfg.batch_size = num_images
38
- dl = instantiate(dl_cfg)
39
- print(dl.image_gpu_transform)
40
- dl = iter(dl)
41
-
42
- while True:
43
- batch = next(dl)
44
- im = get_image(batch, cfg, fscale_vis)
45
- cv2.imshow("", im[:, :, ::-1])
46
- key = cv2.waitKey(0)
47
- if key == ord("q"):
48
- exit()
49
-
50
-
51
- if __name__ == "__main__":
52
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/tools/show_examples.py DELETED
@@ -1,87 +0,0 @@
1
- import cv2
2
- import torch
3
- import numpy as np
4
- import click
5
- import tops
6
- import tqdm
7
- from tops.config import instantiate
8
- from PIL import Image
9
- from dp2 import utils, infer
10
- from dp2.utils import vis_utils
11
- from torchvision.transforms.functional import resize
12
-
13
-
14
- @torch.no_grad()
15
- @torch.cuda.amp.autocast()
16
- def get_im(dl, G, num_images, num_z, fscale_vis, truncation_value: float, b_idx, multi_modal_truncation, show_lowres: bool):
17
- ims = []
18
- G.update_w()
19
- for im_idx in tqdm.trange(num_images, desc="Sampling images"):
20
- batch = next(dl)
21
- ims.append(utils.im2numpy(batch["img"], True, True)[0])
22
- ims.append(utils.im2numpy(batch["condition"], True, True)[0])
23
- ims.append(utils.im2numpy(vis_utils.visualize_batch(**batch))[0])
24
- for z_idx in range(num_z):
25
- # Sample same Z by setting seed for different images
26
- tops.set_seed(b_idx*num_z + z_idx)
27
- if multi_modal_truncation and z_idx > 0:
28
- fake = G.multi_modal_truncate(**batch, truncation_value=0, w_indices=[z_idx-1])
29
- else:
30
- fake = G.sample(**batch, truncation_value=truncation_value)
31
- if "x_lowres" in fake and show_lowres:
32
- for x in fake["x_lowres"]:
33
- x = resize(x, fake["img"].shape[-2:])
34
- ims.append(utils.im2numpy(x, to_uint8=True, denormalize=True)[0])
35
- ims.append(utils.im2numpy(fake["img"], to_uint8=True, denormalize=True)[0])
36
- if fscale_vis != 1:
37
- new_shape = [int(_*fscale_vis) for _ in ims[0].shape[:2][::-1]]
38
- ims = [np.array(Image.fromarray(im).resize(new_shape)) for im in ims]
39
- im = tops.np_make_image_grid(ims, nrow=num_images)
40
- return im
41
-
42
-
43
- @click.command()
44
- @click.argument("config_path")
45
- @click.option("-n", "--num_images", default=8)
46
- @click.option("--num_z", "--nz", default=8)
47
- @click.option("-f", "--fscale_vis", default=1, type=float, help="Scale the output image resultion")
48
- @click.option("-t", "--truncation_value", default=None, type=float)
49
- @click.option("-l", "--show-lowres", default=False, is_flag=True)
50
- @click.option("--save", default=False, is_flag=True)
51
- @click.option("--train", default=False, is_flag=True)
52
- @click.option("--multi-modal-truncation", "--mt", default=False, is_flag=True)
53
- def show_samples(
54
- config_path: str,
55
- save: bool,
56
- train: bool,
57
- **kwargs):
58
- tops.set_seed(1)
59
- cfg = utils.load_config(config_path)
60
- G = infer.build_trained_generator(cfg)
61
- cfg.train.batch_size = 1
62
- if train:
63
- dl_val = cfg.data.train.loader
64
- else:
65
- dl_val = cfg.data.val.loader
66
- dl_val.num_workers = 1
67
- dl_val.shuffle = False
68
- dl_val.infinite = False
69
- tops.set_seed(1)
70
- dl_val = iter(instantiate(dl_val))
71
- b_idx = 0
72
- im = get_im(dl_val, G, b_idx=b_idx, **kwargs)
73
- print("Press 'a' for next image, 'q' to quit.")
74
- while True:
75
- b_idx += 1
76
- cv2.imshow("image", im[:, :, ::-1])
77
- if save:
78
- cv2.imwrite("test.png", im[:, :, ::-1])
79
- print("Saved file to test.png")
80
- key = cv2.waitKey(0)
81
- if key == ord("q"):
82
- break
83
- if key == ord("a"):
84
- im = get_im(dl_val, G, b_idx=b_idx, **kwargs)
85
-
86
- if __name__ == "__main__":
87
- show_samples()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/train.py DELETED
@@ -1,190 +0,0 @@
1
- import tempfile
2
- import click
3
- import tops
4
- import warnings
5
- import traceback
6
- import torch
7
- import os
8
- from tops import checkpointer
9
- from sg3_torch_utils.ops import conv2d_gradfix, grid_sample_gradfix, bias_act, upfirdn2d
10
- from tops.config import instantiate
11
- from tops import logger
12
- from dp2 import utils, infer
13
- from dp2.gan_trainer import GANTrainer
14
-
15
-
16
- torch.backends.cudnn.benchmark = True
17
-
18
-
19
- def start_train(rank, world_size, debug, cfg_path, temp_dir, benchmark: bool):
20
- print(rank, world_size)
21
- cfg = utils.load_config(cfg_path)
22
- if debug:
23
- torch.backends.cudnn.benchmark = False
24
- torch.backends.cudnn.deterministic = True
25
- torch.set_printoptions(precision=10)
26
- else:
27
- torch.backends.cuda.matmul.allow_tf32 = True
28
- torch.backends.cudnn.allow_tf32 = True
29
- conv2d_gradfix.enabled = cfg.train.conv2d_gradfix_enabled
30
- grid_sample_gradfix.enabled = cfg.train.grid_sample_gradfix_enabled
31
- upfirdn2d.enabled = cfg.train.grid_sample_gradfix_enabled
32
- bias_act.enabled = cfg.train.bias_act_plugin_enabled
33
- if world_size > 1:
34
- init_file = os.path.abspath(os.path.join(temp_dir, ".torch_distributed_init"))
35
- init_method = f"file://{init_file}"
36
- torch.distributed.init_process_group(
37
- "nccl", rank=rank, world_size=world_size, init_method=init_method
38
- )
39
- # pin memory in dataloader would allocate memory on device:0 for distributed training.
40
- torch.cuda.set_device(tops.get_device())
41
-
42
- tops.set_AMP(cfg.train.amp.enabled)
43
- utils.init_tops(cfg)
44
- if tops.rank() == 0:
45
- utils.print_config(cfg)
46
- with open(cfg.output_dir.joinpath("config_path.py"), "w") as fp:
47
- fp.write(utils.config_to_str(cfg))
48
-
49
- if world_size > 1:
50
- assert cfg.train.batch_size > tops.world_size()
51
- assert cfg.train.batch_size % tops.world_size() == 0
52
- cfg.train.batch_size //= world_size
53
- if rank != 0:
54
- warnings.filterwarnings("ignore", category=DeprecationWarning)
55
- warnings.filterwarnings("ignore", category=UserWarning)
56
- tops.set_seed(cfg.train.seed + rank)
57
- logger.log("Loading dataset.")
58
- dl_val = instantiate(cfg.data.val.loader, channels_last=cfg.train.channels_last)
59
- dl_train = instantiate(cfg.data.train.loader, channels_last=cfg.train.channels_last)
60
- dl_train = iter(dl_train)
61
-
62
- logger.log("Initializing models.")
63
- G = instantiate(cfg.generator)
64
- D = tops.to_cuda(instantiate(cfg.discriminator))
65
- if tops.rank() == 0:
66
- print(G)
67
- print(D)
68
-
69
- # TODO: EMA MIGHT NEED TO BE SYNCED ACCROSS GPUs before instantiate
70
- G_EMA = utils.EMA(G, cfg.train.batch_size * world_size, **cfg.EMA)
71
- G = tops.to_cuda(G)
72
- if world_size > 1:
73
- logger.log("Syncing models accross GPUs")
74
- # Distributed is implemented self. # Buffers are never broadcasted during training.
75
- for module in [G_EMA, G, D]:
76
- params_and_buffers = list(module.named_parameters())
77
- params_and_buffers += list(module.named_buffers())
78
- for name, param in params_and_buffers:
79
- torch.distributed.broadcast(param, src=0)
80
- if cfg.train.compile_D.enabled:
81
- compile_kwargs = instantiate(cfg.train.compile_D)
82
- compile_kwargs.pop("enabled")
83
- D = torch.compile(D, **compile_kwargs)
84
- if cfg.train.compile_G.enabled:
85
- compile_kwargs = instantiate(cfg.train.compile_G)
86
- compile_kwargs.pop("enabled")
87
- G = torch.compile(G, **compile_kwargs)
88
- logger.log("Initializing optimizers")
89
- grad_scaler_D = instantiate(cfg.train.amp.scaler_D)
90
- grad_scaler_G = instantiate(cfg.train.amp.scaler_G)
91
-
92
- G_optim = instantiate(cfg.G_optim, params=G.parameters())
93
- D_optim = instantiate(cfg.D_optim, params=D.parameters())
94
-
95
- loss_fnc = instantiate(cfg.loss_fnc, D=D, G=G)
96
- logger.add_scalar("stats/gpu_batch_size", cfg.train.batch_size)
97
- logger.add_scalar("stats/ngpus", world_size)
98
-
99
- D.train()
100
- G.train()
101
- if hasattr(cfg.train, "discriminator_init_cfg") and not benchmark:
102
- cfg_ = utils.load_config(cfg.train.discriminator_init_cfg)
103
- ckpt = checkpointer.load_checkpoint(cfg_.checkpoint_dir)["discriminator"]
104
- if hasattr(cfg_, "ckpt_mapper_D"):
105
- ckpt = instantiate(cfg_.ckpt_mapper_D)(ckpt)
106
- D.load_state_dict(ckpt)
107
- if hasattr(cfg.train, "generator_init_cfg") and not benchmark:
108
- cfg_ = utils.load_config(cfg.train.generator_init_cfg)
109
- ckpt = checkpointer.load_checkpoint(cfg_.checkpoint_dir)["EMA_generator"]
110
- if hasattr(cfg_, "ckpt_mapper"):
111
- ckpt = instantiate(cfg_.ckpt_mapper)(ckpt)
112
- infer.load_state_dict(G, ckpt)
113
- infer.load_state_dict(G_EMA.generator, ckpt)
114
-
115
- G_EMA.eval()
116
- if cfg.train.channels_last:
117
- G = G.to(memory_format=torch.channels_last)
118
- D = D.to(memory_format=torch.channels_last)
119
-
120
- if tops.world_size() > 1:
121
- torch.distributed.barrier()
122
-
123
- trainer = GANTrainer(
124
- G=G,
125
- D=D,
126
- G_EMA=G_EMA,
127
- D_optim=D_optim,
128
- G_optim=G_optim,
129
- dl_train=dl_train,
130
- dl_val=dl_val,
131
- scaler_D=grad_scaler_D,
132
- scaler_G=grad_scaler_G,
133
- ims_per_log=cfg.train.ims_per_log,
134
- max_images_to_train=cfg.train.max_images_to_train,
135
- ims_per_val=cfg.train.ims_per_val,
136
- loss_handler=loss_fnc,
137
- evaluate_fn=instantiate(cfg.data.train_evaluation_fn),
138
- batch_size=cfg.train.batch_size,
139
- broadcast_buffers=cfg.train.broadcast_buffers,
140
- fp16_ddp_accumulate=cfg.train.fp16_ddp_accumulate,
141
- save_state=not benchmark
142
- )
143
- if benchmark:
144
- trainer.estimate_ims_per_hour()
145
- if world_size > 1:
146
- torch.distributed.barrier()
147
- logger.finish()
148
- if world_size > 1:
149
- torch.distributed.destroy_process_group()
150
- return
151
-
152
- try:
153
- trainer.train_loop()
154
- except Exception as e:
155
- traceback.print_exc()
156
- exit()
157
- tops.set_AMP(False)
158
- tops.set_seed(0)
159
- metrics = instantiate(cfg.data.evaluation_fn)(generator=G_EMA, dataloader=dl_val)
160
- metrics = {f"metrics_final/{k}": v for k, v in metrics.items()}
161
- logger.add_dict(metrics, level=logger.logger.INFO)
162
- if world_size > 1:
163
- torch.distributed.barrier()
164
- logger.finish()
165
-
166
- if world_size > 1:
167
- torch.distributed.destroy_process_group()
168
-
169
-
170
- @click.command()
171
- @click.argument("config_path")
172
- @click.option("--debug", default=False, is_flag=True)
173
- @click.option("--benchmark", default=False, is_flag=True)
174
- def main(config_path: str, debug: bool, benchmark: bool):
175
- world_size = (
176
- torch.cuda.device_count()
177
- ) # Manually overriding this does not work. have to set CUDA_VISIBLE_DEVICES environment variable
178
- if world_size > 1:
179
- torch.multiprocessing.set_start_method("spawn", force=True)
180
- with tempfile.TemporaryDirectory() as temp_dir:
181
- torch.multiprocessing.spawn(
182
- start_train,
183
- args=(world_size, debug, config_path, temp_dir, benchmark),
184
- nprocs=torch.cuda.device_count(),
185
- )
186
- else:
187
- start_train(0, 1, debug, config_path, None, benchmark)
188
-
189
- if __name__ == "__main__":
190
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deep_privacy/validate.py DELETED
@@ -1,64 +0,0 @@
1
- import click
2
- import torch
3
- import os
4
- import tempfile
5
- from dp2.infer import build_trained_generator
6
- from tops.config import instantiate
7
- from dp2.utils import load_config
8
- import tops
9
- from tops import logger
10
-
11
-
12
- def validate(
13
- rank,
14
- config_path,
15
- batch_size: int,
16
- truncation_value: float,
17
- world_size,
18
- temp_dir,
19
- ):
20
- tops.set_seed(0)
21
- tops.set_AMP(False)
22
- if world_size > 1:
23
- init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
24
- init_method = f'file://{init_file}'
25
- torch.distributed.init_process_group(
26
- "nccl", rank=rank, world_size=world_size, init_method=init_method)
27
- torch.cuda.set_device(tops.get_device()) # pin memory in dataloader would allocate memory on device:0 for distributed training.
28
- cfg = load_config(config_path)
29
-
30
- if batch_size is not None:
31
- assert cfg.train.batch_size % world_size == 0
32
- cfg.train.batch_size = batch_size // world_size
33
- dl_val = instantiate(cfg.data.val.loader)
34
- G = build_trained_generator(cfg)
35
- tops.set_seed(0)
36
- tops.set_AMP(False)
37
- metrics = instantiate(cfg.data.evaluation_fn)(generator=G, dataloader=dl_val, truncation_value=truncation_value)
38
- metrics = {f"metrics_final/{k}": v for k,v in metrics.items()}
39
- if rank == 0:
40
- tops.init(cfg.output_dir)
41
- logger.add_dict(metrics)
42
- logger.finish()
43
-
44
-
45
- @click.command()
46
- @click.argument("config_path")
47
- @click.option("--batch_size", default=16, type=int)
48
- @click.option("--truncation-value", default=None, type=float)
49
- def main(config_path, batch_size: int, truncation_value: float):
50
- world_size = torch.cuda.device_count()
51
- if world_size > 1:
52
- torch.multiprocessing.set_start_method("spawn", force=True)
53
- with tempfile.TemporaryDirectory() as temp_dir:
54
- torch.multiprocessing.spawn(validate,
55
- args=(config_path, batch_size, truncation_value, world_size, temp_dir),
56
- nprocs=world_size)
57
- else:
58
- validate(
59
- 0, config_path, batch_size, truncation_value,
60
- world_size=1, temp_dir=None)
61
-
62
-
63
- if __name__ == "__main__":
64
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
{deep_privacy/dp2 → dp2}/__init__.py RENAMED
File without changes
{deep_privacy/dp2 → dp2}/anonymizer/__init__.py RENAMED
File without changes
{deep_privacy/dp2 → dp2}/anonymizer/anonymizer.py RENAMED
File without changes
{deep_privacy/dp2 → dp2}/anonymizer/histogram_match_anonymizers.py RENAMED
File without changes
{deep_privacy/dp2 → dp2}/data/__init__.py RENAMED
File without changes
{deep_privacy/dp2 → dp2}/data/build.py RENAMED
File without changes
{deep_privacy/dp2 → dp2}/data/datasets/__init__.py RENAMED
File without changes
{deep_privacy/dp2 → dp2}/data/datasets/coco_cse.py RENAMED
File without changes
{deep_privacy/dp2 → dp2}/data/datasets/fdf.py RENAMED
File without changes
{deep_privacy/dp2 → dp2}/data/datasets/fdf128_wds.py RENAMED
File without changes