-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlocalmain3.py
150 lines (135 loc) · 5.97 KB
/
localmain3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import numpy as np
from foolbox2.criteria import TargetClass
from foolbox2.models.wrappers2 import CompositeModel
# from fmodel3 import create_fmodel_combo
from fmodel import create_fmodel as create_fmodel_18
from fmodel2 import create_fmodel as create_fmodel_ALP
from fmodel5 import create_fmodel as create_fmodel_ALP1000
from foolbox2.attacks.iterative_projected_gradient import MomentumIterativeAttack
from foolbox2.distances import MeanSquaredDistance
from foolbox2.adversarial import Adversarial
# from adversarial_vision_challenge import load_model
# from adversarial_vision_challenge import read_images
# from adversarial_vision_challenge import store_adversarial
# from adversarial_vision_challenge import attack_complete
from smiterative2 import SAIterativeAttack, RMSIterativeAttack, \
AdamIterativeAttack, AdagradIterativeAttack
import os
import sys
from smiterative2 import SAIterativeAttack, RMSIterativeAttack, AdamIterativeAttack, AdagradIterativeAttack
import os, csv
from scipy.misc import imread, imsave
import PIL.Image
def read_images():
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "flower")
with open(os.path.join(data_dir, "target_class.csv")) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
yield (row[0], np.array(PIL.Image.open(os.path.join(data_dir, row[1])).convert("RGB")), int(row[2]))
def attack_complete():
pass
def store_adversarial(file_name, adversarial):
out_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
imsave(os.path.join(out_dir, file_name + ".png"), adversarial, format="png")
def load_model():
return create_fmodel_18()
# from contextlib import contextmanager
# import numpy as np
# import tensorflow as tf
# import tensorflow.contrib.slim as slim
# from tensorflow.contrib.slim.nets import resnet_v1
# import tensorpack as tp
# import tensorpack.utils.viz as viz
# from skimage.transform import resize
# IMAGE_SIZE = 224
# @contextmanager
# def guided_relu():
# """
# Returns:
# A context where the gradient of :meth:`tf.nn.relu` is replaced by
# guided back-propagation, as described in the paper:
# `Striving for Simplicity: The All Convolutional Net
# <https://arxiv.org/abs/1412.6806>`_
# """
# from tensorflow.python.ops import gen_nn_ops # noqa
# @tf.RegisterGradient("GuidedReLU")
# def GuidedReluGrad(op, grad):
# return tf.where(0. < grad,
# gen_nn_ops.relu_grad(grad, op.outputs[0]),
# tf.zeros(grad.get_shape()))
# g = tf.get_default_graph()
# with g.gradient_override_map({'Relu': 'GuidedReLU'}):
# yield
# def saliency_map(output, input, name="saliency_map"):
# """
# Produce a saliency map as described in the paper:
# `Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps
# <https://arxiv.org/abs/1312.6034>`_.
# The saliency map is the gradient of the max element in output w.r.t input.
# Returns:
# tf.Tensor: the saliency map. Has the same shape as input.
# """
# max_outp = tf.reduce_max(output, 1)
# saliency_op = tf.gradients(max_outp, input)[:][0]
# return tf.identity(saliency_op, name=name)
# class SaliencyModel(tp.ModelDescBase):
# def inputs(self):
# return [tf.placeholder(tf.float32, (IMAGE_SIZE, IMAGE_SIZE, 3), 'image')]
# def build_graph(self, orig_image):
# mean = tf.get_variable('resnet_v1_50/mean_rgb', shape=[3])
# with guided_relu():
# with slim.arg_scope(resnet_v1.resnet_arg_scope()):
# image = tf.expand_dims(orig_image - mean, 0)
# logits, _ = resnet_v1.resnet_v1_50(image, 1000)
# saliency_map(logits, orig_image, name="saliency")
# def find_salience(predictor, im):
# # resnet expect RGB inputs of 224x224x3
# im = resize(im, (IMAGE_SIZE, IMAGE_SIZE))
# im = im.astype(np.float32)[:, :, ::-1]
# # print(type(im))
# saliency_images = predictor(im)[0]
# # print(saliency_images)
# # print(type(saliency_images))
# pos_saliency = np.maximum(0, saliency_images)
# resized_pos_saliency = resize(pos_saliency, (64,64))
# # print(resized_pos_saliency.shape)
# return resized_pos_saliency
def run_attack(model, image, target_class, pos_salience):
criterion = TargetClass(target_class)
# model == Composite model
# Backward model = substitute model (resnet vgg alex) used to calculate gradients
# Forward model = black-box model
distance = MeanSquaredDistance
attack = AdamIterativeAttack()
# attack = foolbox.attacks.annealer(model, criterion)
# prediction of our black box model on the original image
original_label = np.argmax(model.predictions(image))
adv = Adversarial(model, criterion, image, original_label, distance=distance)
return attack(adv, pos_salience = pos_salience)
def main():
# tf.logging.set_verbosity(tf.logging.INFO)
# instantiate blackbox and substitute model
# instantiate blackbox and substitute model
forward_model = load_model()
# backward_model1 = create_fmodel_18()
backward_model2 = create_fmodel_ALP()
# backward_model3 = create_fmodel_ALP1000()
# print(backward_model1[0])
# instantiate differntiable composite model
# (predictions from blackbox, gradients from substitute)
model = CompositeModel(
forward_model = forward_model,
backward_models=[backward_model2],
weights = [1])
# predictor = tp.OfflinePredictor(tp.PredictConfig(
# model=SaliencyModel(),
# session_init=tp.get_model_loader("resnet_v1_50.ckpt"),
# input_names=['image'],
# output_names=['saliency']))
for (file_name, image, label) in read_images():
# pos_salience = find_salience(predictor, image)
adversarial = run_attack(model, image, label, None)
store_adversarial(file_name, adversarial)
attack_complete()
if __name__ == '__main__':
main()