Spaces:
Running
Running
xiaoyao9184
commited on
Commit
•
d849382
1
Parent(s):
1a990e7
Synced repo using 'sync_with_huggingface' Github Action
Browse files- app.py +44 -44
- gradio_app.py +393 -393
app.py
CHANGED
@@ -1,44 +1,44 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
import git
|
4 |
-
import subprocess
|
5 |
-
from huggingface_hub import hf_hub_download
|
6 |
-
|
7 |
-
REPO_URL = "https://github.com/facebookresearch/watermark-anything.git"
|
8 |
-
REPO_BRANCH = '45d56c2b61f2bc73caeafc90e14df33ad50b238c'
|
9 |
-
LOCAL_PATH = "./watermark-anything"
|
10 |
-
MODEL_ID = "facebook/watermark-anything"
|
11 |
-
|
12 |
-
def install_src():
|
13 |
-
if not os.path.exists(LOCAL_PATH):
|
14 |
-
print(f"Cloning repository from {REPO_URL}...")
|
15 |
-
repo = git.Repo.clone_from(REPO_URL, LOCAL_PATH)
|
16 |
-
repo.git.checkout(REPO_BRANCH)
|
17 |
-
else:
|
18 |
-
print(f"Repository already exists at {LOCAL_PATH}")
|
19 |
-
|
20 |
-
requirements_path = os.path.join(LOCAL_PATH, "requirements.txt")
|
21 |
-
if os.path.exists(requirements_path):
|
22 |
-
print("Installing requirements...")
|
23 |
-
subprocess.check_call(["pip", "install", "-r", requirements_path])
|
24 |
-
else:
|
25 |
-
print("No requirements.txt found.")
|
26 |
-
|
27 |
-
def install_model():
|
28 |
-
checkpoint_path = os.path.join(LOCAL_PATH, "checkpoints")
|
29 |
-
hf_hub_download(repo_id=MODEL_ID, filename='checkpoint.pth', local_dir=checkpoint_path)
|
30 |
-
|
31 |
-
# clone repo and download model
|
32 |
-
install_src()
|
33 |
-
install_model()
|
34 |
-
|
35 |
-
# change directory
|
36 |
-
print(f"Current Directory: {os.getcwd()}")
|
37 |
-
os.chdir(LOCAL_PATH)
|
38 |
-
print(f"New Directory: {os.getcwd()}")
|
39 |
-
|
40 |
-
# fix sys.path for import
|
41 |
-
sys.path.append(os.getcwd())
|
42 |
-
|
43 |
-
# run gradio
|
44 |
-
import gradio_app
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import git
|
4 |
+
import subprocess
|
5 |
+
from huggingface_hub import hf_hub_download
|
6 |
+
|
7 |
+
REPO_URL = "https://github.com/facebookresearch/watermark-anything.git"
|
8 |
+
REPO_BRANCH = '45d56c2b61f2bc73caeafc90e14df33ad50b238c'
|
9 |
+
LOCAL_PATH = "./watermark-anything"
|
10 |
+
MODEL_ID = "facebook/watermark-anything"
|
11 |
+
|
12 |
+
def install_src():
|
13 |
+
if not os.path.exists(LOCAL_PATH):
|
14 |
+
print(f"Cloning repository from {REPO_URL}...")
|
15 |
+
repo = git.Repo.clone_from(REPO_URL, LOCAL_PATH)
|
16 |
+
repo.git.checkout(REPO_BRANCH)
|
17 |
+
else:
|
18 |
+
print(f"Repository already exists at {LOCAL_PATH}")
|
19 |
+
|
20 |
+
requirements_path = os.path.join(LOCAL_PATH, "requirements.txt")
|
21 |
+
if os.path.exists(requirements_path):
|
22 |
+
print("Installing requirements...")
|
23 |
+
subprocess.check_call(["pip", "install", "-r", requirements_path])
|
24 |
+
else:
|
25 |
+
print("No requirements.txt found.")
|
26 |
+
|
27 |
+
def install_model():
|
28 |
+
checkpoint_path = os.path.join(LOCAL_PATH, "checkpoints")
|
29 |
+
hf_hub_download(repo_id=MODEL_ID, filename='checkpoint.pth', local_dir=checkpoint_path)
|
30 |
+
|
31 |
+
# clone repo and download model
|
32 |
+
install_src()
|
33 |
+
install_model()
|
34 |
+
|
35 |
+
# change directory
|
36 |
+
print(f"Current Directory: {os.getcwd()}")
|
37 |
+
os.chdir(LOCAL_PATH)
|
38 |
+
print(f"New Directory: {os.getcwd()}")
|
39 |
+
|
40 |
+
# fix sys.path for import
|
41 |
+
sys.path.append(os.getcwd())
|
42 |
+
|
43 |
+
# run gradio
|
44 |
+
import gradio_app
|
gradio_app.py
CHANGED
@@ -1,393 +1,393 @@
|
|
1 |
-
import os
|
2 |
-
import sys
|
3 |
-
if "APP_PATH" in os.environ:
|
4 |
-
os.chdir(os.environ["APP_PATH"])
|
5 |
-
# fix sys.path for import
|
6 |
-
sys.path.append(os.getcwd())
|
7 |
-
|
8 |
-
import gradio as gr
|
9 |
-
|
10 |
-
import re
|
11 |
-
import string
|
12 |
-
import random
|
13 |
-
import os
|
14 |
-
import numpy as np
|
15 |
-
from PIL import Image
|
16 |
-
import torch
|
17 |
-
import torch.nn.functional as F
|
18 |
-
from torchvision import transforms
|
19 |
-
|
20 |
-
|
21 |
-
from watermark_anything.data.metrics import msg_predict_inference
|
22 |
-
from notebooks.inference_utils import (
|
23 |
-
load_model_from_checkpoint,
|
24 |
-
default_transform,
|
25 |
-
unnormalize_img,
|
26 |
-
create_random_mask,
|
27 |
-
plot_outputs,
|
28 |
-
msg2str,
|
29 |
-
torch_to_np,
|
30 |
-
multiwm_dbscan
|
31 |
-
)
|
32 |
-
|
33 |
-
# Device configuration
|
34 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
35 |
-
|
36 |
-
# Constants
|
37 |
-
proportion_masked = 0.5 # Proportion of image to be watermarked
|
38 |
-
epsilon = 1 # min distance between decoded messages in a cluster
|
39 |
-
min_samples = 500 # min number of pixels in a 256x256 image to form a cluster
|
40 |
-
|
41 |
-
# Color map for visualization
|
42 |
-
color_map = {
|
43 |
-
-1: [0, 0, 0], # Black for -1
|
44 |
-
0: [255, 0, 255], # ? for 0
|
45 |
-
1: [255, 0, 0], # Red for 1
|
46 |
-
2: [0, 255, 0], # Green for 2
|
47 |
-
3: [0, 0, 255], # Blue for 3
|
48 |
-
4: [255, 255, 0], # Yellow for 4
|
49 |
-
}
|
50 |
-
|
51 |
-
def load_wam():
|
52 |
-
# Load the model from the specified checkpoint
|
53 |
-
exp_dir = "checkpoints"
|
54 |
-
json_path = os.path.join(exp_dir, "params.json")
|
55 |
-
ckpt_path = os.path.join(exp_dir, 'checkpoint.pth')
|
56 |
-
wam = load_model_from_checkpoint(json_path, ckpt_path).to(device).eval()
|
57 |
-
return wam
|
58 |
-
|
59 |
-
def image_detect(img_pil: Image.Image) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):
|
60 |
-
img_pt = default_transform(img_pil).unsqueeze(0).to(device) # [1, 3, H, W]
|
61 |
-
|
62 |
-
# Detect the watermark in the multi-watermarked image
|
63 |
-
preds = wam.detect(img_pt)["preds"] # [1, 33, 256, 256]
|
64 |
-
mask_preds = F.sigmoid(preds[:, 0, :, :]) # [1, 256, 256], predicted mask
|
65 |
-
mask_preds_res = F.interpolate(mask_preds.unsqueeze(1), size=(img_pt.shape[-2], img_pt.shape[-1]), mode="bilinear", align_corners=False) # [1, 1, H, W]
|
66 |
-
bit_preds = preds[:, 1:, :, :] # [1, 32, 256, 256], predicted bits
|
67 |
-
|
68 |
-
# positions has the cluster number at each pixel. can be upsaled back to the original size.
|
69 |
-
try:
|
70 |
-
centroids, positions = multiwm_dbscan(bit_preds, mask_preds, epsilon=epsilon, min_samples=min_samples)
|
71 |
-
centroids_pt = torch.stack(list(centroids.values()))
|
72 |
-
except (UnboundLocalError) as e:
|
73 |
-
print(f"Error while detecting watermark: {e}")
|
74 |
-
positions = None
|
75 |
-
centroids = None
|
76 |
-
centroids_pt = None
|
77 |
-
|
78 |
-
return img_pt, (mask_preds_res>0.5).float(), positions, centroids, centroids_pt
|
79 |
-
|
80 |
-
def image_embed(img_pil: Image.Image, wm_msgs: torch.Tensor, wm_masks: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor):
|
81 |
-
img_pt = default_transform(img_pil).unsqueeze(0).to(device) # [1, 3, H, W]
|
82 |
-
|
83 |
-
# Embed the watermark message into the image
|
84 |
-
# Mask to use. 1 values correspond to pixels where the watermark will be placed.
|
85 |
-
multi_wm_img = img_pt.clone()
|
86 |
-
for ii in range(len(wm_msgs)):
|
87 |
-
wm_msg, mask = wm_msgs[ii].unsqueeze(0), wm_masks[ii]
|
88 |
-
outputs = wam.embed(img_pt, wm_msg)
|
89 |
-
multi_wm_img = outputs['imgs_w'] * mask + multi_wm_img * (1 - mask)
|
90 |
-
|
91 |
-
torch.cuda.empty_cache()
|
92 |
-
return img_pt, multi_wm_img, wm_masks.sum(0)
|
93 |
-
|
94 |
-
def create_bounding_mask(img_size, boxes):
|
95 |
-
"""Create a binary mask from bounding boxes.
|
96 |
-
|
97 |
-
Args:
|
98 |
-
img_size (tuple): Image size (height, width)
|
99 |
-
boxes (list): List of tuples (x1, y1, x2, y2) defining bounding boxes
|
100 |
-
|
101 |
-
Returns:
|
102 |
-
torch.Tensor: Binary mask tensor
|
103 |
-
"""
|
104 |
-
mask = torch.zeros(img_size)
|
105 |
-
for x1, y1, x2, y2 in boxes:
|
106 |
-
mask[y1:y2, x1:x2] = 1
|
107 |
-
return mask
|
108 |
-
|
109 |
-
def centroid_to_hex(centroid):
|
110 |
-
binary_int = 0
|
111 |
-
for bit in centroid:
|
112 |
-
binary_int = (binary_int << 1) | int(bit.item())
|
113 |
-
return format(binary_int, '08x')
|
114 |
-
|
115 |
-
# Load the model
|
116 |
-
wam = load_wam()
|
117 |
-
|
118 |
-
def detect_watermark(image):
|
119 |
-
if image is None:
|
120 |
-
return None, None, None, {"status": "error", "messages": [], "error": "No image provided"}
|
121 |
-
|
122 |
-
img_pil = Image.fromarray(image).convert("RGB")
|
123 |
-
det_img, pred, positions, centroids, centroids_pt = image_detect(img_pil)
|
124 |
-
|
125 |
-
# Convert tensor images to numpy for display
|
126 |
-
detected_img = torch_to_np(det_img.detach())
|
127 |
-
pred_mask = torch_to_np(pred.detach().repeat(1, 3, 1, 1))
|
128 |
-
|
129 |
-
# Create cluster visualization
|
130 |
-
if positions is not None:
|
131 |
-
resize_ori = transforms.Resize(det_img.shape[-2:])
|
132 |
-
rgb_image = torch.zeros((3, positions.shape[-1], positions.shape[-2]), dtype=torch.uint8)
|
133 |
-
for value, color in color_map.items():
|
134 |
-
mask_ = positions == value
|
135 |
-
for channel, color_value in enumerate(color):
|
136 |
-
rgb_image[channel][mask_.squeeze()] = color_value
|
137 |
-
rgb_image = resize_ori(rgb_image.float()/255)
|
138 |
-
cluster_viz = rgb_image.permute(1, 2, 0).numpy()
|
139 |
-
|
140 |
-
# Create message output as JSON
|
141 |
-
messages = []
|
142 |
-
for key in centroids.keys():
|
143 |
-
centroid_hex = centroid_to_hex(centroids[key])
|
144 |
-
centroid_hex_array = "-".join([centroid_hex[i:i+4] for i in range(0, len(centroid_hex), 4)])
|
145 |
-
messages.append({
|
146 |
-
"id": int(key),
|
147 |
-
"message": centroid_hex_array,
|
148 |
-
"color": color_map[key]
|
149 |
-
})
|
150 |
-
message_json = {
|
151 |
-
"status": "success",
|
152 |
-
"messages": messages,
|
153 |
-
"count": len(messages)
|
154 |
-
}
|
155 |
-
else:
|
156 |
-
cluster_viz = np.zeros_like(detected_img)
|
157 |
-
message_json = {
|
158 |
-
"status": "no_detection",
|
159 |
-
"messages": [],
|
160 |
-
"count": 0
|
161 |
-
}
|
162 |
-
|
163 |
-
return pred_mask, cluster_viz, message_json
|
164 |
-
|
165 |
-
def embed_watermark(image, wm_num, wm_type, wm_str, wm_loc):
|
166 |
-
if image is None:
|
167 |
-
return None, None, {
|
168 |
-
"status": "failure",
|
169 |
-
"messages": "No image provided"
|
170 |
-
}
|
171 |
-
|
172 |
-
if wm_type == "input":
|
173 |
-
if not re.match(r"^([0-9A-F]{4}-[0-9A-F]{4}-){%d}[0-9A-F]{4}-[0-9A-F]{4}$" % (wm_num-1), wm_str):
|
174 |
-
tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(wm_num)])
|
175 |
-
return None, None, {
|
176 |
-
"status": "failure",
|
177 |
-
"messages": f"Invalid type input. Please use {tip}"
|
178 |
-
}
|
179 |
-
|
180 |
-
if wm_loc == "bounding":
|
181 |
-
if ROI_coordinates['clicks'] != wm_num * 2:
|
182 |
-
return None, None, {
|
183 |
-
"status": "failure",
|
184 |
-
"messages": "Invalid location input. Please draw at least %d bounding ROI" % (wm_num)
|
185 |
-
}
|
186 |
-
|
187 |
-
img_pil = Image.fromarray(image).convert("RGB")
|
188 |
-
|
189 |
-
# Generate watermark messages based on type
|
190 |
-
wm_msgs = []
|
191 |
-
if wm_type == "random":
|
192 |
-
chars = '-'.join(''.join(random.choice(string.hexdigits) for _ in range(4)) for _ in range(wm_num * 2))
|
193 |
-
wm_str = chars.lower()
|
194 |
-
wm_hex = wm_str.replace("-", "")
|
195 |
-
for i in range(0, len(wm_hex), 8):
|
196 |
-
chunk = wm_hex[i:i+8]
|
197 |
-
binary = bin(int(chunk, 16))[2:].zfill(32)
|
198 |
-
wm_msgs.append([int(b) for b in binary])
|
199 |
-
# Define a 32-bit message to be embedded into the images
|
200 |
-
wm_msgs = torch.tensor(wm_msgs, dtype=torch.float32).to(device)
|
201 |
-
|
202 |
-
# Create mask based on location type
|
203 |
-
wm_masks = None
|
204 |
-
if wm_loc == "random":
|
205 |
-
img_pt = default_transform(img_pil).unsqueeze(0).to(device)
|
206 |
-
# To ensure at least `proportion_masked %` of the width is randomly usable,
|
207 |
-
# otherwise, it is easy to enter an infinite loop and fail to find a usable width.
|
208 |
-
mask_percentage = img_pil.height / img_pil.width * proportion_masked / wm_num
|
209 |
-
wm_masks = create_random_mask(img_pt, num_masks=wm_num, mask_percentage=mask_percentage)
|
210 |
-
elif wm_loc == "bounding" and sections:
|
211 |
-
wm_masks = torch.zeros((len(sections), 1, img_pil.height, img_pil.width), dtype=torch.float32).to(device)
|
212 |
-
for idx, ((x_start, y_start, x_end, y_end), _) in enumerate(sections):
|
213 |
-
left = min(x_start, x_end)
|
214 |
-
right = max(x_start, x_end)
|
215 |
-
top = min(y_start, y_end)
|
216 |
-
bottom = max(y_start, y_end)
|
217 |
-
wm_masks[idx, 0, top:bottom, left:right] = 1
|
218 |
-
|
219 |
-
|
220 |
-
img_pt, embed_img_pt, embed_mask_pt = image_embed(img_pil, wm_msgs, wm_masks)
|
221 |
-
|
222 |
-
# Convert to numpy for display
|
223 |
-
img_np = torch_to_np(embed_img_pt.detach())
|
224 |
-
mask_np = torch_to_np(embed_mask_pt.detach().expand(3, -1, -1))
|
225 |
-
message_json = {
|
226 |
-
"status": "success",
|
227 |
-
"messages": wm_str
|
228 |
-
}
|
229 |
-
return img_np, mask_np, message_json
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
# ROI means Region Of Interest. It is the region where the user clicks
|
234 |
-
# to specify the location of the watermark.
|
235 |
-
ROI_coordinates = {
|
236 |
-
'x_temp': 0,
|
237 |
-
'y_temp': 0,
|
238 |
-
'x_new': 0,
|
239 |
-
'y_new': 0,
|
240 |
-
'clicks': 0,
|
241 |
-
}
|
242 |
-
|
243 |
-
sections = []
|
244 |
-
|
245 |
-
def get_select_coordinates(img, evt: gr.SelectData, num):
|
246 |
-
if ROI_coordinates['clicks'] >= num * 2:
|
247 |
-
gr.Warning(f"Cant add more than {num} of Watermarks.")
|
248 |
-
return (img, sections)
|
249 |
-
|
250 |
-
# update new coordinates
|
251 |
-
ROI_coordinates['clicks'] += 1
|
252 |
-
ROI_coordinates['x_temp'] = ROI_coordinates['x_new']
|
253 |
-
ROI_coordinates['y_temp'] = ROI_coordinates['y_new']
|
254 |
-
ROI_coordinates['x_new'] = evt.index[0]
|
255 |
-
ROI_coordinates['y_new'] = evt.index[1]
|
256 |
-
# compare start end coordinates
|
257 |
-
x_start = ROI_coordinates['x_new'] if (ROI_coordinates['x_new'] < ROI_coordinates['x_temp']) else ROI_coordinates['x_temp']
|
258 |
-
y_start = ROI_coordinates['y_new'] if (ROI_coordinates['y_new'] < ROI_coordinates['y_temp']) else ROI_coordinates['y_temp']
|
259 |
-
x_end = ROI_coordinates['x_new'] if (ROI_coordinates['x_new'] > ROI_coordinates['x_temp']) else ROI_coordinates['x_temp']
|
260 |
-
y_end = ROI_coordinates['y_new'] if (ROI_coordinates['y_new'] > ROI_coordinates['y_temp']) else ROI_coordinates['y_temp']
|
261 |
-
if ROI_coordinates['clicks'] % 2 == 0:
|
262 |
-
sections[len(sections) - 1] = ((x_start, y_start, x_end, y_end), f"Mask {len(sections)}")
|
263 |
-
# both start and end point get
|
264 |
-
return (img, sections)
|
265 |
-
else:
|
266 |
-
point_width = int(img.shape[0]*0.05)
|
267 |
-
sections.append(((ROI_coordinates['x_new'], ROI_coordinates['y_new'],
|
268 |
-
ROI_coordinates['x_new'] + point_width, ROI_coordinates['y_new'] + point_width),
|
269 |
-
f"Click second point for Mask {len(sections) + 1}"))
|
270 |
-
return (img, sections)
|
271 |
-
|
272 |
-
def del_select_coordinates(img, evt: gr.SelectData):
|
273 |
-
del sections[evt.index]
|
274 |
-
# recreate section names
|
275 |
-
for i in range(len(sections)):
|
276 |
-
sections[i] = (sections[i][0], f"Mask {i + 1}")
|
277 |
-
|
278 |
-
# last section clicking second point not complete
|
279 |
-
if ROI_coordinates['clicks'] % 2 != 0:
|
280 |
-
if len(sections) == evt.index:
|
281 |
-
# delete last section
|
282 |
-
ROI_coordinates['clicks'] -= 1
|
283 |
-
else:
|
284 |
-
# recreate last section name for second point
|
285 |
-
ROI_coordinates['clicks'] -= 2
|
286 |
-
sections[len(sections) - 1] = (sections[len(sections) - 1][0], f"Click second point for Mask {len(sections) + 1}")
|
287 |
-
else:
|
288 |
-
ROI_coordinates['clicks'] -= 2
|
289 |
-
|
290 |
-
return (img[0], sections)
|
291 |
-
|
292 |
-
with gr.Blocks(title="Watermark Anything Demo") as demo:
|
293 |
-
gr.Markdown("""
|
294 |
-
# Watermark Anything Demo
|
295 |
-
This app demonstrates watermark detection and embedding using the Watermark Anything model.
|
296 |
-
Find the project [here](https://github.com/facebookresearch/watermark-anything).
|
297 |
-
""")
|
298 |
-
|
299 |
-
with gr.Tabs():
|
300 |
-
with gr.TabItem("Embed Watermark"):
|
301 |
-
with gr.Row():
|
302 |
-
with gr.Column():
|
303 |
-
embedding_img = gr.Image(label="Input Image", type="numpy")
|
304 |
-
|
305 |
-
with gr.Column():
|
306 |
-
embedding_num = gr.Slider(1, 5, value=1, step=1, label="Number of Watermarks")
|
307 |
-
embedding_type = gr.Radio(["random", "input"], value="random", label="Type", info="Type of watermarks")
|
308 |
-
embedding_str = gr.Textbox(label="Watermark Text", visible=False, show_copy_button=True)
|
309 |
-
embedding_loc = gr.Radio(["random", "bounding"], value="random", label="Location", info="Location of watermarks")
|
310 |
-
|
311 |
-
@gr.render(inputs=embedding_loc)
|
312 |
-
def show_split(wm_loc):
|
313 |
-
if wm_loc == "bounding":
|
314 |
-
embedding_box = gr.AnnotatedImage(
|
315 |
-
label="ROI",
|
316 |
-
color_map={
|
317 |
-
"ROI of Watermark embedding": "#9987FF",
|
318 |
-
"Click second point for ROI": "#f44336"}
|
319 |
-
)
|
320 |
-
|
321 |
-
embedding_img.select(
|
322 |
-
fn=get_select_coordinates,
|
323 |
-
inputs=[embedding_img, embedding_num],
|
324 |
-
outputs=embedding_box)
|
325 |
-
embedding_box.select(
|
326 |
-
fn=del_select_coordinates,
|
327 |
-
inputs=embedding_box,
|
328 |
-
outputs=embedding_box
|
329 |
-
)
|
330 |
-
else:
|
331 |
-
embedding_img.select()
|
332 |
-
|
333 |
-
embedding_btn = gr.Button("Embed Watermark")
|
334 |
-
marked_msg = gr.JSON(label="Marked Messages")
|
335 |
-
with gr.Row():
|
336 |
-
marked_image = gr.Image(label="Watermarked Image")
|
337 |
-
marked_mask = gr.Image(label="Position of the watermark")
|
338 |
-
|
339 |
-
def visible_text_label(embedding_type, embedding_num):
|
340 |
-
if embedding_type == "input":
|
341 |
-
tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(embedding_num)])
|
342 |
-
return gr.update(visible=True, label=f"Watermark Text (Format: {tip})")
|
343 |
-
else:
|
344 |
-
return gr.update(visible=False)
|
345 |
-
|
346 |
-
def check_embedding_str(embedding_str, embedding_num):
|
347 |
-
if not re.match(r"^([0-9A-F]{4}-[0-9A-F]{4}-){%d}[0-9A-F]{4}-[0-9A-F]{4}$" % (embedding_num-1), embedding_str):
|
348 |
-
tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(embedding_num)])
|
349 |
-
gr.Warning(f"Invalid format. Please use {tip}", duration=0)
|
350 |
-
return gr.update(interactive=False)
|
351 |
-
else:
|
352 |
-
return gr.update(interactive=True)
|
353 |
-
|
354 |
-
embedding_num.change(
|
355 |
-
fn=visible_text_label,
|
356 |
-
inputs=[embedding_type, embedding_num],
|
357 |
-
outputs=[embedding_str]
|
358 |
-
)
|
359 |
-
embedding_type.change(
|
360 |
-
fn=visible_text_label,
|
361 |
-
inputs=[embedding_type, embedding_num],
|
362 |
-
outputs=[embedding_str]
|
363 |
-
)
|
364 |
-
embedding_str.change(
|
365 |
-
fn=check_embedding_str,
|
366 |
-
inputs=[embedding_str, embedding_num],
|
367 |
-
outputs=[embedding_btn]
|
368 |
-
)
|
369 |
-
|
370 |
-
embedding_btn.click(
|
371 |
-
fn=embed_watermark,
|
372 |
-
inputs=[embedding_img, embedding_num, embedding_type, embedding_str, embedding_loc],
|
373 |
-
outputs=[marked_image, marked_mask, marked_msg]
|
374 |
-
)
|
375 |
-
|
376 |
-
with gr.TabItem("Detect Watermark"):
|
377 |
-
with gr.Row():
|
378 |
-
with gr.Column():
|
379 |
-
detecting_img = gr.Image(label="Input Image", type="numpy")
|
380 |
-
with gr.Column():
|
381 |
-
detecting_btn = gr.Button("Detect Watermark")
|
382 |
-
predicted_messages = gr.JSON(label="Detected Messages")
|
383 |
-
with gr.Row():
|
384 |
-
predicted_mask = gr.Image(label="Predicted Watermark Position")
|
385 |
-
predicted_cluster = gr.Image(label="Watermark Clusters")
|
386 |
-
|
387 |
-
detecting_btn.click(
|
388 |
-
fn=detect_watermark,
|
389 |
-
inputs=[detecting_img],
|
390 |
-
outputs=[predicted_mask, predicted_cluster, predicted_messages]
|
391 |
-
)
|
392 |
-
|
393 |
-
demo.launch()
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
if "APP_PATH" in os.environ:
|
4 |
+
os.chdir(os.environ["APP_PATH"])
|
5 |
+
# fix sys.path for import
|
6 |
+
sys.path.append(os.getcwd())
|
7 |
+
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
import re
|
11 |
+
import string
|
12 |
+
import random
|
13 |
+
import os
|
14 |
+
import numpy as np
|
15 |
+
from PIL import Image
|
16 |
+
import torch
|
17 |
+
import torch.nn.functional as F
|
18 |
+
from torchvision import transforms
|
19 |
+
|
20 |
+
|
21 |
+
from watermark_anything.data.metrics import msg_predict_inference
|
22 |
+
from notebooks.inference_utils import (
|
23 |
+
load_model_from_checkpoint,
|
24 |
+
default_transform,
|
25 |
+
unnormalize_img,
|
26 |
+
create_random_mask,
|
27 |
+
plot_outputs,
|
28 |
+
msg2str,
|
29 |
+
torch_to_np,
|
30 |
+
multiwm_dbscan
|
31 |
+
)
|
32 |
+
|
33 |
+
# Device configuration
|
34 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
35 |
+
|
36 |
+
# Constants
|
37 |
+
proportion_masked = 0.5 # Proportion of image to be watermarked
|
38 |
+
epsilon = 1 # min distance between decoded messages in a cluster
|
39 |
+
min_samples = 500 # min number of pixels in a 256x256 image to form a cluster
|
40 |
+
|
41 |
+
# Color map for visualization
|
42 |
+
color_map = {
|
43 |
+
-1: [0, 0, 0], # Black for -1
|
44 |
+
0: [255, 0, 255], # ? for 0
|
45 |
+
1: [255, 0, 0], # Red for 1
|
46 |
+
2: [0, 255, 0], # Green for 2
|
47 |
+
3: [0, 0, 255], # Blue for 3
|
48 |
+
4: [255, 255, 0], # Yellow for 4
|
49 |
+
}
|
50 |
+
|
51 |
+
def load_wam():
|
52 |
+
# Load the model from the specified checkpoint
|
53 |
+
exp_dir = "checkpoints"
|
54 |
+
json_path = os.path.join(exp_dir, "params.json")
|
55 |
+
ckpt_path = os.path.join(exp_dir, 'checkpoint.pth')
|
56 |
+
wam = load_model_from_checkpoint(json_path, ckpt_path).to(device).eval()
|
57 |
+
return wam
|
58 |
+
|
59 |
+
def image_detect(img_pil: Image.Image) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):
|
60 |
+
img_pt = default_transform(img_pil).unsqueeze(0).to(device) # [1, 3, H, W]
|
61 |
+
|
62 |
+
# Detect the watermark in the multi-watermarked image
|
63 |
+
preds = wam.detect(img_pt)["preds"] # [1, 33, 256, 256]
|
64 |
+
mask_preds = F.sigmoid(preds[:, 0, :, :]) # [1, 256, 256], predicted mask
|
65 |
+
mask_preds_res = F.interpolate(mask_preds.unsqueeze(1), size=(img_pt.shape[-2], img_pt.shape[-1]), mode="bilinear", align_corners=False) # [1, 1, H, W]
|
66 |
+
bit_preds = preds[:, 1:, :, :] # [1, 32, 256, 256], predicted bits
|
67 |
+
|
68 |
+
# positions has the cluster number at each pixel. can be upsaled back to the original size.
|
69 |
+
try:
|
70 |
+
centroids, positions = multiwm_dbscan(bit_preds, mask_preds, epsilon=epsilon, min_samples=min_samples)
|
71 |
+
centroids_pt = torch.stack(list(centroids.values()))
|
72 |
+
except (UnboundLocalError) as e:
|
73 |
+
print(f"Error while detecting watermark: {e}")
|
74 |
+
positions = None
|
75 |
+
centroids = None
|
76 |
+
centroids_pt = None
|
77 |
+
|
78 |
+
return img_pt, (mask_preds_res>0.5).float(), positions, centroids, centroids_pt
|
79 |
+
|
80 |
+
def image_embed(img_pil: Image.Image, wm_msgs: torch.Tensor, wm_masks: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor):
|
81 |
+
img_pt = default_transform(img_pil).unsqueeze(0).to(device) # [1, 3, H, W]
|
82 |
+
|
83 |
+
# Embed the watermark message into the image
|
84 |
+
# Mask to use. 1 values correspond to pixels where the watermark will be placed.
|
85 |
+
multi_wm_img = img_pt.clone()
|
86 |
+
for ii in range(len(wm_msgs)):
|
87 |
+
wm_msg, mask = wm_msgs[ii].unsqueeze(0), wm_masks[ii]
|
88 |
+
outputs = wam.embed(img_pt, wm_msg)
|
89 |
+
multi_wm_img = outputs['imgs_w'] * mask + multi_wm_img * (1 - mask)
|
90 |
+
|
91 |
+
torch.cuda.empty_cache()
|
92 |
+
return img_pt, multi_wm_img, wm_masks.sum(0)
|
93 |
+
|
94 |
+
def create_bounding_mask(img_size, boxes):
|
95 |
+
"""Create a binary mask from bounding boxes.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
img_size (tuple): Image size (height, width)
|
99 |
+
boxes (list): List of tuples (x1, y1, x2, y2) defining bounding boxes
|
100 |
+
|
101 |
+
Returns:
|
102 |
+
torch.Tensor: Binary mask tensor
|
103 |
+
"""
|
104 |
+
mask = torch.zeros(img_size)
|
105 |
+
for x1, y1, x2, y2 in boxes:
|
106 |
+
mask[y1:y2, x1:x2] = 1
|
107 |
+
return mask
|
108 |
+
|
109 |
+
def centroid_to_hex(centroid):
|
110 |
+
binary_int = 0
|
111 |
+
for bit in centroid:
|
112 |
+
binary_int = (binary_int << 1) | int(bit.item())
|
113 |
+
return format(binary_int, '08x')
|
114 |
+
|
115 |
+
# Load the model
|
116 |
+
wam = load_wam()
|
117 |
+
|
118 |
+
def detect_watermark(image):
|
119 |
+
if image is None:
|
120 |
+
return None, None, None, {"status": "error", "messages": [], "error": "No image provided"}
|
121 |
+
|
122 |
+
img_pil = Image.fromarray(image).convert("RGB")
|
123 |
+
det_img, pred, positions, centroids, centroids_pt = image_detect(img_pil)
|
124 |
+
|
125 |
+
# Convert tensor images to numpy for display
|
126 |
+
detected_img = torch_to_np(det_img.detach())
|
127 |
+
pred_mask = torch_to_np(pred.detach().repeat(1, 3, 1, 1))
|
128 |
+
|
129 |
+
# Create cluster visualization
|
130 |
+
if positions is not None:
|
131 |
+
resize_ori = transforms.Resize(det_img.shape[-2:])
|
132 |
+
rgb_image = torch.zeros((3, positions.shape[-1], positions.shape[-2]), dtype=torch.uint8)
|
133 |
+
for value, color in color_map.items():
|
134 |
+
mask_ = positions == value
|
135 |
+
for channel, color_value in enumerate(color):
|
136 |
+
rgb_image[channel][mask_.squeeze()] = color_value
|
137 |
+
rgb_image = resize_ori(rgb_image.float()/255)
|
138 |
+
cluster_viz = rgb_image.permute(1, 2, 0).numpy()
|
139 |
+
|
140 |
+
# Create message output as JSON
|
141 |
+
messages = []
|
142 |
+
for key in centroids.keys():
|
143 |
+
centroid_hex = centroid_to_hex(centroids[key])
|
144 |
+
centroid_hex_array = "-".join([centroid_hex[i:i+4] for i in range(0, len(centroid_hex), 4)])
|
145 |
+
messages.append({
|
146 |
+
"id": int(key),
|
147 |
+
"message": centroid_hex_array,
|
148 |
+
"color": color_map[key]
|
149 |
+
})
|
150 |
+
message_json = {
|
151 |
+
"status": "success",
|
152 |
+
"messages": messages,
|
153 |
+
"count": len(messages)
|
154 |
+
}
|
155 |
+
else:
|
156 |
+
cluster_viz = np.zeros_like(detected_img)
|
157 |
+
message_json = {
|
158 |
+
"status": "no_detection",
|
159 |
+
"messages": [],
|
160 |
+
"count": 0
|
161 |
+
}
|
162 |
+
|
163 |
+
return pred_mask, cluster_viz, message_json
|
164 |
+
|
165 |
+
def embed_watermark(image, wm_num, wm_type, wm_str, wm_loc):
|
166 |
+
if image is None:
|
167 |
+
return None, None, {
|
168 |
+
"status": "failure",
|
169 |
+
"messages": "No image provided"
|
170 |
+
}
|
171 |
+
|
172 |
+
if wm_type == "input":
|
173 |
+
if not re.match(r"^([0-9A-F]{4}-[0-9A-F]{4}-){%d}[0-9A-F]{4}-[0-9A-F]{4}$" % (wm_num-1), wm_str):
|
174 |
+
tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(wm_num)])
|
175 |
+
return None, None, {
|
176 |
+
"status": "failure",
|
177 |
+
"messages": f"Invalid type input. Please use {tip}"
|
178 |
+
}
|
179 |
+
|
180 |
+
if wm_loc == "bounding":
|
181 |
+
if ROI_coordinates['clicks'] != wm_num * 2:
|
182 |
+
return None, None, {
|
183 |
+
"status": "failure",
|
184 |
+
"messages": "Invalid location input. Please draw at least %d bounding ROI" % (wm_num)
|
185 |
+
}
|
186 |
+
|
187 |
+
img_pil = Image.fromarray(image).convert("RGB")
|
188 |
+
|
189 |
+
# Generate watermark messages based on type
|
190 |
+
wm_msgs = []
|
191 |
+
if wm_type == "random":
|
192 |
+
chars = '-'.join(''.join(random.choice(string.hexdigits) for _ in range(4)) for _ in range(wm_num * 2))
|
193 |
+
wm_str = chars.lower()
|
194 |
+
wm_hex = wm_str.replace("-", "")
|
195 |
+
for i in range(0, len(wm_hex), 8):
|
196 |
+
chunk = wm_hex[i:i+8]
|
197 |
+
binary = bin(int(chunk, 16))[2:].zfill(32)
|
198 |
+
wm_msgs.append([int(b) for b in binary])
|
199 |
+
# Define a 32-bit message to be embedded into the images
|
200 |
+
wm_msgs = torch.tensor(wm_msgs, dtype=torch.float32).to(device)
|
201 |
+
|
202 |
+
# Create mask based on location type
|
203 |
+
wm_masks = None
|
204 |
+
if wm_loc == "random":
|
205 |
+
img_pt = default_transform(img_pil).unsqueeze(0).to(device)
|
206 |
+
# To ensure at least `proportion_masked %` of the width is randomly usable,
|
207 |
+
# otherwise, it is easy to enter an infinite loop and fail to find a usable width.
|
208 |
+
mask_percentage = img_pil.height / img_pil.width * proportion_masked / wm_num
|
209 |
+
wm_masks = create_random_mask(img_pt, num_masks=wm_num, mask_percentage=mask_percentage)
|
210 |
+
elif wm_loc == "bounding" and sections:
|
211 |
+
wm_masks = torch.zeros((len(sections), 1, img_pil.height, img_pil.width), dtype=torch.float32).to(device)
|
212 |
+
for idx, ((x_start, y_start, x_end, y_end), _) in enumerate(sections):
|
213 |
+
left = min(x_start, x_end)
|
214 |
+
right = max(x_start, x_end)
|
215 |
+
top = min(y_start, y_end)
|
216 |
+
bottom = max(y_start, y_end)
|
217 |
+
wm_masks[idx, 0, top:bottom, left:right] = 1
|
218 |
+
|
219 |
+
|
220 |
+
img_pt, embed_img_pt, embed_mask_pt = image_embed(img_pil, wm_msgs, wm_masks)
|
221 |
+
|
222 |
+
# Convert to numpy for display
|
223 |
+
img_np = torch_to_np(embed_img_pt.detach())
|
224 |
+
mask_np = torch_to_np(embed_mask_pt.detach().expand(3, -1, -1))
|
225 |
+
message_json = {
|
226 |
+
"status": "success",
|
227 |
+
"messages": wm_str
|
228 |
+
}
|
229 |
+
return img_np, mask_np, message_json
|
230 |
+
|
231 |
+
|
232 |
+
|
233 |
+
# ROI means Region Of Interest. It is the region where the user clicks
|
234 |
+
# to specify the location of the watermark.
|
235 |
+
ROI_coordinates = {
|
236 |
+
'x_temp': 0,
|
237 |
+
'y_temp': 0,
|
238 |
+
'x_new': 0,
|
239 |
+
'y_new': 0,
|
240 |
+
'clicks': 0,
|
241 |
+
}
|
242 |
+
|
243 |
+
sections = []
|
244 |
+
|
245 |
+
def get_select_coordinates(img, evt: gr.SelectData, num):
|
246 |
+
if ROI_coordinates['clicks'] >= num * 2:
|
247 |
+
gr.Warning(f"Cant add more than {num} of Watermarks.")
|
248 |
+
return (img, sections)
|
249 |
+
|
250 |
+
# update new coordinates
|
251 |
+
ROI_coordinates['clicks'] += 1
|
252 |
+
ROI_coordinates['x_temp'] = ROI_coordinates['x_new']
|
253 |
+
ROI_coordinates['y_temp'] = ROI_coordinates['y_new']
|
254 |
+
ROI_coordinates['x_new'] = evt.index[0]
|
255 |
+
ROI_coordinates['y_new'] = evt.index[1]
|
256 |
+
# compare start end coordinates
|
257 |
+
x_start = ROI_coordinates['x_new'] if (ROI_coordinates['x_new'] < ROI_coordinates['x_temp']) else ROI_coordinates['x_temp']
|
258 |
+
y_start = ROI_coordinates['y_new'] if (ROI_coordinates['y_new'] < ROI_coordinates['y_temp']) else ROI_coordinates['y_temp']
|
259 |
+
x_end = ROI_coordinates['x_new'] if (ROI_coordinates['x_new'] > ROI_coordinates['x_temp']) else ROI_coordinates['x_temp']
|
260 |
+
y_end = ROI_coordinates['y_new'] if (ROI_coordinates['y_new'] > ROI_coordinates['y_temp']) else ROI_coordinates['y_temp']
|
261 |
+
if ROI_coordinates['clicks'] % 2 == 0:
|
262 |
+
sections[len(sections) - 1] = ((x_start, y_start, x_end, y_end), f"Mask {len(sections)}")
|
263 |
+
# both start and end point get
|
264 |
+
return (img, sections)
|
265 |
+
else:
|
266 |
+
point_width = int(img.shape[0]*0.05)
|
267 |
+
sections.append(((ROI_coordinates['x_new'], ROI_coordinates['y_new'],
|
268 |
+
ROI_coordinates['x_new'] + point_width, ROI_coordinates['y_new'] + point_width),
|
269 |
+
f"Click second point for Mask {len(sections) + 1}"))
|
270 |
+
return (img, sections)
|
271 |
+
|
272 |
+
def del_select_coordinates(img, evt: gr.SelectData):
|
273 |
+
del sections[evt.index]
|
274 |
+
# recreate section names
|
275 |
+
for i in range(len(sections)):
|
276 |
+
sections[i] = (sections[i][0], f"Mask {i + 1}")
|
277 |
+
|
278 |
+
# last section clicking second point not complete
|
279 |
+
if ROI_coordinates['clicks'] % 2 != 0:
|
280 |
+
if len(sections) == evt.index:
|
281 |
+
# delete last section
|
282 |
+
ROI_coordinates['clicks'] -= 1
|
283 |
+
else:
|
284 |
+
# recreate last section name for second point
|
285 |
+
ROI_coordinates['clicks'] -= 2
|
286 |
+
sections[len(sections) - 1] = (sections[len(sections) - 1][0], f"Click second point for Mask {len(sections) + 1}")
|
287 |
+
else:
|
288 |
+
ROI_coordinates['clicks'] -= 2
|
289 |
+
|
290 |
+
return (img[0], sections)
|
291 |
+
|
292 |
+
with gr.Blocks(title="Watermark Anything Demo") as demo:
|
293 |
+
gr.Markdown("""
|
294 |
+
# Watermark Anything Demo
|
295 |
+
This app demonstrates watermark detection and embedding using the Watermark Anything model.
|
296 |
+
Find the project [here](https://github.com/facebookresearch/watermark-anything).
|
297 |
+
""")
|
298 |
+
|
299 |
+
with gr.Tabs():
|
300 |
+
with gr.TabItem("Embed Watermark"):
|
301 |
+
with gr.Row():
|
302 |
+
with gr.Column():
|
303 |
+
embedding_img = gr.Image(label="Input Image", type="numpy")
|
304 |
+
|
305 |
+
with gr.Column():
|
306 |
+
embedding_num = gr.Slider(1, 5, value=1, step=1, label="Number of Watermarks")
|
307 |
+
embedding_type = gr.Radio(["random", "input"], value="random", label="Type", info="Type of watermarks")
|
308 |
+
embedding_str = gr.Textbox(label="Watermark Text", visible=False, show_copy_button=True)
|
309 |
+
embedding_loc = gr.Radio(["random", "bounding"], value="random", label="Location", info="Location of watermarks")
|
310 |
+
|
311 |
+
@gr.render(inputs=embedding_loc)
|
312 |
+
def show_split(wm_loc):
|
313 |
+
if wm_loc == "bounding":
|
314 |
+
embedding_box = gr.AnnotatedImage(
|
315 |
+
label="ROI",
|
316 |
+
color_map={
|
317 |
+
"ROI of Watermark embedding": "#9987FF",
|
318 |
+
"Click second point for ROI": "#f44336"}
|
319 |
+
)
|
320 |
+
|
321 |
+
embedding_img.select(
|
322 |
+
fn=get_select_coordinates,
|
323 |
+
inputs=[embedding_img, embedding_num],
|
324 |
+
outputs=embedding_box)
|
325 |
+
embedding_box.select(
|
326 |
+
fn=del_select_coordinates,
|
327 |
+
inputs=embedding_box,
|
328 |
+
outputs=embedding_box
|
329 |
+
)
|
330 |
+
else:
|
331 |
+
embedding_img.select()
|
332 |
+
|
333 |
+
embedding_btn = gr.Button("Embed Watermark")
|
334 |
+
marked_msg = gr.JSON(label="Marked Messages")
|
335 |
+
with gr.Row():
|
336 |
+
marked_image = gr.Image(label="Watermarked Image")
|
337 |
+
marked_mask = gr.Image(label="Position of the watermark")
|
338 |
+
|
339 |
+
def visible_text_label(embedding_type, embedding_num):
|
340 |
+
if embedding_type == "input":
|
341 |
+
tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(embedding_num)])
|
342 |
+
return gr.update(visible=True, label=f"Watermark Text (Format: {tip})")
|
343 |
+
else:
|
344 |
+
return gr.update(visible=False)
|
345 |
+
|
346 |
+
def check_embedding_str(embedding_str, embedding_num):
|
347 |
+
if not re.match(r"^([0-9A-F]{4}-[0-9A-F]{4}-){%d}[0-9A-F]{4}-[0-9A-F]{4}$" % (embedding_num-1), embedding_str):
|
348 |
+
tip = "-".join([f"FFFF-{_}{_}{_}{_}" for _ in range(embedding_num)])
|
349 |
+
gr.Warning(f"Invalid format. Please use {tip}", duration=0)
|
350 |
+
return gr.update(interactive=False)
|
351 |
+
else:
|
352 |
+
return gr.update(interactive=True)
|
353 |
+
|
354 |
+
embedding_num.change(
|
355 |
+
fn=visible_text_label,
|
356 |
+
inputs=[embedding_type, embedding_num],
|
357 |
+
outputs=[embedding_str]
|
358 |
+
)
|
359 |
+
embedding_type.change(
|
360 |
+
fn=visible_text_label,
|
361 |
+
inputs=[embedding_type, embedding_num],
|
362 |
+
outputs=[embedding_str]
|
363 |
+
)
|
364 |
+
embedding_str.change(
|
365 |
+
fn=check_embedding_str,
|
366 |
+
inputs=[embedding_str, embedding_num],
|
367 |
+
outputs=[embedding_btn]
|
368 |
+
)
|
369 |
+
|
370 |
+
embedding_btn.click(
|
371 |
+
fn=embed_watermark,
|
372 |
+
inputs=[embedding_img, embedding_num, embedding_type, embedding_str, embedding_loc],
|
373 |
+
outputs=[marked_image, marked_mask, marked_msg]
|
374 |
+
)
|
375 |
+
|
376 |
+
with gr.TabItem("Detect Watermark"):
|
377 |
+
with gr.Row():
|
378 |
+
with gr.Column():
|
379 |
+
detecting_img = gr.Image(label="Input Image", type="numpy")
|
380 |
+
with gr.Column():
|
381 |
+
detecting_btn = gr.Button("Detect Watermark")
|
382 |
+
predicted_messages = gr.JSON(label="Detected Messages")
|
383 |
+
with gr.Row():
|
384 |
+
predicted_mask = gr.Image(label="Predicted Watermark Position")
|
385 |
+
predicted_cluster = gr.Image(label="Watermark Clusters")
|
386 |
+
|
387 |
+
detecting_btn.click(
|
388 |
+
fn=detect_watermark,
|
389 |
+
inputs=[detecting_img],
|
390 |
+
outputs=[predicted_mask, predicted_cluster, predicted_messages]
|
391 |
+
)
|
392 |
+
|
393 |
+
demo.launch()
|