Spaces:
Build error
Build error
update
Browse files- .ipynb_checkpoints/app-checkpoint.py +86 -0
- app.py +86 -0
- checkpoints/robust_crossvit_18_dagger_408.pt +3 -0
- requirements.txt +14 -0
- samples/test.png +0 -0
- src/.ipynb_checkpoints/utils-checkpoint.py +35 -0
- src/__pycache__/utils.cpython-38.pyc +0 -0
- src/utils.py +35 -0
.ipynb_checkpoints/app-checkpoint.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from robustness.datasets import ImageNet
|
| 4 |
+
from robustness.attacker import AttackerModel
|
| 5 |
+
from timm.models import create_model
|
| 6 |
+
from torchvision import transforms
|
| 7 |
+
from robustness.tools.label_maps import CLASS_DICT
|
| 8 |
+
from src.utils import *
|
| 9 |
+
from torchvision import transforms
|
| 10 |
+
import gradio as gr
|
| 11 |
+
import os
|
| 12 |
+
from PIL import Image
|
| 13 |
+
|
| 14 |
+
DICT_CLASSES = {'lake':955,
|
| 15 |
+
'castle':483,
|
| 16 |
+
'library':624}
|
| 17 |
+
IMG_MAX_SIZE = 256
|
| 18 |
+
ARCH = 'crossvit_18_dagger_408'
|
| 19 |
+
ARCH_PATH = './checkpoints/robust_crossvit_18_dagger_408.pt'
|
| 20 |
+
CUSTOM_TRANSFORMS = transforms.Compose([transforms.Resize([IMG_MAX_SIZE,IMG_MAX_SIZE]),
|
| 21 |
+
transforms.ToTensor()])
|
| 22 |
+
DEVICE = 'cpu'
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def load_model(robust = True):
|
| 26 |
+
test_image = Image.open('samples/test.png')
|
| 27 |
+
ds = CustomArt(test_image,CUSTOM_TRANSFORMS)
|
| 28 |
+
model = create_model(ARCH,pretrained = True).to(DEVICE)
|
| 29 |
+
if robust:
|
| 30 |
+
print("Load Robust Model")
|
| 31 |
+
checkpoint = torch.load(ARCH_PATH,map_location = DEVICE)
|
| 32 |
+
model.load_state_dict(checkpoint['state_dict'],strict = True)
|
| 33 |
+
model = RobustModel(model).to(DEVICE)
|
| 34 |
+
model = AttackerModel(model, ds).to(DEVICE)
|
| 35 |
+
model = model.eval()
|
| 36 |
+
del test_image,ds
|
| 37 |
+
return model
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def gradio_fn(image_input,radio_steps,radio_class,radio_robust):
|
| 41 |
+
model = load_model(radio_robust)
|
| 42 |
+
kwargs = {
|
| 43 |
+
'constraint':'2', # L2 attack
|
| 44 |
+
'eps': 300,
|
| 45 |
+
'step_size': 1,
|
| 46 |
+
'iterations': int(radio_steps),
|
| 47 |
+
'targeted': True,
|
| 48 |
+
'do_tqdm': True,
|
| 49 |
+
#'device': DEVICE
|
| 50 |
+
}
|
| 51 |
+
# Define the target and the image
|
| 52 |
+
target = torch.tensor([int(DICT_CLASSES[radio_class])]).to(DEVICE)
|
| 53 |
+
image = Image.fromarray(image_input)
|
| 54 |
+
image = CUSTOM_TRANSFORMS(image).to(DEVICE)
|
| 55 |
+
image = torch.unsqueeze(image, dim=0)
|
| 56 |
+
_, im_adv = model(image, target, make_adv=True, **kwargs)
|
| 57 |
+
im_adv = im_adv.squeeze(dim = 0).permute(1,2,0).cpu().numpy()
|
| 58 |
+
return im_adv
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
if __name__ == '__main__':
|
| 62 |
+
demo = gr.Blocks()
|
| 63 |
+
with demo:
|
| 64 |
+
gr.Markdown("# Art Adversarial Attack")
|
| 65 |
+
with gr.Row():
|
| 66 |
+
with gr.Column():
|
| 67 |
+
with gr.Row():
|
| 68 |
+
# Radio Steps Adversarial attack
|
| 69 |
+
radio_steps = gr.Radio([10,500,1000,1500,2000],value = 500,label="# Attack Steps")
|
| 70 |
+
# Radio Targeted attack
|
| 71 |
+
radio_class = gr.Radio(list(DICT_CLASSES.keys()),
|
| 72 |
+
value = list(DICT_CLASSES.keys())[0],
|
| 73 |
+
label="Target Class")
|
| 74 |
+
radio_robust = gr.Radio([True,False],value = True,label="Robust Model")
|
| 75 |
+
# Image
|
| 76 |
+
with gr.Row():
|
| 77 |
+
image_input = gr.Image(label="Input Image")
|
| 78 |
+
with gr.Row():
|
| 79 |
+
calculate_button = gr.Button("Compute")
|
| 80 |
+
with gr.Column():
|
| 81 |
+
target_image = gr.Image(label="Art Image")
|
| 82 |
+
|
| 83 |
+
calculate_button.click(fn = gradio_fn,
|
| 84 |
+
inputs = [image_input,radio_steps,radio_class,radio_robust],
|
| 85 |
+
outputs = target_image)
|
| 86 |
+
demo.launch(share = True,debug = True)
|
app.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from robustness.datasets import ImageNet
|
| 4 |
+
from robustness.attacker import AttackerModel
|
| 5 |
+
from timm.models import create_model
|
| 6 |
+
from torchvision import transforms
|
| 7 |
+
from robustness.tools.label_maps import CLASS_DICT
|
| 8 |
+
from src.utils import *
|
| 9 |
+
from torchvision import transforms
|
| 10 |
+
import gradio as gr
|
| 11 |
+
import os
|
| 12 |
+
from PIL import Image
|
| 13 |
+
|
| 14 |
+
DICT_CLASSES = {'lake':955,
|
| 15 |
+
'castle':483,
|
| 16 |
+
'library':624}
|
| 17 |
+
IMG_MAX_SIZE = 256
|
| 18 |
+
ARCH = 'crossvit_18_dagger_408'
|
| 19 |
+
ARCH_PATH = './checkpoints/robust_crossvit_18_dagger_408.pt'
|
| 20 |
+
CUSTOM_TRANSFORMS = transforms.Compose([transforms.Resize([IMG_MAX_SIZE,IMG_MAX_SIZE]),
|
| 21 |
+
transforms.ToTensor()])
|
| 22 |
+
DEVICE = 'cpu'
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def load_model(robust = True):
|
| 26 |
+
test_image = Image.open('samples/test.png')
|
| 27 |
+
ds = CustomArt(test_image,CUSTOM_TRANSFORMS)
|
| 28 |
+
model = create_model(ARCH,pretrained = True).to(DEVICE)
|
| 29 |
+
if robust:
|
| 30 |
+
print("Load Robust Model")
|
| 31 |
+
checkpoint = torch.load(ARCH_PATH,map_location = DEVICE)
|
| 32 |
+
model.load_state_dict(checkpoint['state_dict'],strict = True)
|
| 33 |
+
model = RobustModel(model).to(DEVICE)
|
| 34 |
+
model = AttackerModel(model, ds).to(DEVICE)
|
| 35 |
+
model = model.eval()
|
| 36 |
+
del test_image,ds
|
| 37 |
+
return model
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def gradio_fn(image_input,radio_steps,radio_class,radio_robust):
|
| 41 |
+
model = load_model(radio_robust)
|
| 42 |
+
kwargs = {
|
| 43 |
+
'constraint':'2', # L2 attack
|
| 44 |
+
'eps': 300,
|
| 45 |
+
'step_size': 1,
|
| 46 |
+
'iterations': int(radio_steps),
|
| 47 |
+
'targeted': True,
|
| 48 |
+
'do_tqdm': True,
|
| 49 |
+
#'device': DEVICE
|
| 50 |
+
}
|
| 51 |
+
# Define the target and the image
|
| 52 |
+
target = torch.tensor([int(DICT_CLASSES[radio_class])]).to(DEVICE)
|
| 53 |
+
image = Image.fromarray(image_input)
|
| 54 |
+
image = CUSTOM_TRANSFORMS(image).to(DEVICE)
|
| 55 |
+
image = torch.unsqueeze(image, dim=0)
|
| 56 |
+
_, im_adv = model(image, target, make_adv=True, **kwargs)
|
| 57 |
+
im_adv = im_adv.squeeze(dim = 0).permute(1,2,0).cpu().numpy()
|
| 58 |
+
return im_adv
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
if __name__ == '__main__':
|
| 62 |
+
demo = gr.Blocks()
|
| 63 |
+
with demo:
|
| 64 |
+
gr.Markdown("# Art Adversarial Attack")
|
| 65 |
+
with gr.Row():
|
| 66 |
+
with gr.Column():
|
| 67 |
+
with gr.Row():
|
| 68 |
+
# Radio Steps Adversarial attack
|
| 69 |
+
radio_steps = gr.Radio([10,500,1000,1500,2000],value = 500,label="# Attack Steps")
|
| 70 |
+
# Radio Targeted attack
|
| 71 |
+
radio_class = gr.Radio(list(DICT_CLASSES.keys()),
|
| 72 |
+
value = list(DICT_CLASSES.keys())[0],
|
| 73 |
+
label="Target Class")
|
| 74 |
+
radio_robust = gr.Radio([True,False],value = True,label="Robust Model")
|
| 75 |
+
# Image
|
| 76 |
+
with gr.Row():
|
| 77 |
+
image_input = gr.Image(label="Input Image")
|
| 78 |
+
with gr.Row():
|
| 79 |
+
calculate_button = gr.Button("Compute")
|
| 80 |
+
with gr.Column():
|
| 81 |
+
target_image = gr.Image(label="Art Image")
|
| 82 |
+
|
| 83 |
+
calculate_button.click(fn = gradio_fn,
|
| 84 |
+
inputs = [image_input,radio_steps,radio_class,radio_robust],
|
| 85 |
+
outputs = target_image)
|
| 86 |
+
demo.launch(share = True,debug = True)
|
checkpoints/robust_crossvit_18_dagger_408.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ea8cbd2047ad6fa0eddd23cf9cd026ca63b114001e0d7063ab23e2973a49cd0
|
| 3 |
+
size 535639640
|
requirements.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Install robustness
|
| 2 |
+
pandas
|
| 3 |
+
numpy
|
| 4 |
+
scipy
|
| 5 |
+
GPUtil
|
| 6 |
+
dill
|
| 7 |
+
tensorboardX
|
| 8 |
+
tables
|
| 9 |
+
scikit-learn
|
| 10 |
+
seaborn
|
| 11 |
+
cox
|
| 12 |
+
matplotlib
|
| 13 |
+
networkx
|
| 14 |
+
git+https://github.com/williamberrios/robustness
|
samples/test.png
ADDED
|
src/.ipynb_checkpoints/utils-checkpoint.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from typing import Dict, Iterable, Callable
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
import glob
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
import numpy as np
|
| 9 |
+
from PIL import ImageFile
|
| 10 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 11 |
+
Image.MAX_IMAGE_PIXELS = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# +
|
| 15 |
+
class RobustModel(nn.Module):
|
| 16 |
+
def __init__(self, model):
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.model = model
|
| 19 |
+
def forward(self, x, *args, **kwargs):
|
| 20 |
+
return self.model(x)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class CustomArt(torch.utils.data.Dataset):
|
| 24 |
+
def __init__(self, image,transforms=None):
|
| 25 |
+
self.transforms = transforms
|
| 26 |
+
self.image = image
|
| 27 |
+
self.mean = torch.tensor([0.4850, 0.4560, 0.4060])
|
| 28 |
+
self.std = torch.tensor([0.2290, 0.2240, 0.2250])
|
| 29 |
+
def __getitem__(self, idx):
|
| 30 |
+
if self.transforms:
|
| 31 |
+
img = self.transforms(self.image)
|
| 32 |
+
return torch.as_tensor(img, dtype=torch.float)
|
| 33 |
+
|
| 34 |
+
def __len__(self):
|
| 35 |
+
return len(self.image)
|
src/__pycache__/utils.cpython-38.pyc
ADDED
|
Binary file (1.81 kB). View file
|
|
|
src/utils.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from typing import Dict, Iterable, Callable
|
| 5 |
+
from torch import Tensor
|
| 6 |
+
import glob
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
import numpy as np
|
| 9 |
+
from PIL import ImageFile
|
| 10 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 11 |
+
Image.MAX_IMAGE_PIXELS = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# +
|
| 15 |
+
class RobustModel(nn.Module):
|
| 16 |
+
def __init__(self, model):
|
| 17 |
+
super().__init__()
|
| 18 |
+
self.model = model
|
| 19 |
+
def forward(self, x, *args, **kwargs):
|
| 20 |
+
return self.model(x)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class CustomArt(torch.utils.data.Dataset):
|
| 24 |
+
def __init__(self, image,transforms=None):
|
| 25 |
+
self.transforms = transforms
|
| 26 |
+
self.image = image
|
| 27 |
+
self.mean = torch.tensor([0.4850, 0.4560, 0.4060])
|
| 28 |
+
self.std = torch.tensor([0.2290, 0.2240, 0.2250])
|
| 29 |
+
def __getitem__(self, idx):
|
| 30 |
+
if self.transforms:
|
| 31 |
+
img = self.transforms(self.image)
|
| 32 |
+
return torch.as_tensor(img, dtype=torch.float)
|
| 33 |
+
|
| 34 |
+
def __len__(self):
|
| 35 |
+
return len(self.image)
|