I've implemented on Google Colab the following algorithm
from the paper Maximal Jacobian-based Saliency Map Attack
However, even a single iteration of the attack is taking hours. So I need to adapt the algorithm so that it works efficiently on GPU.
I don't know anything about adapting code for GPU.
Here's my code:
def clip_eps(x,eps):
return min(1,x+eps,max(0,x-eps,x))
def jsma_attack(image, y, f, Imax, th, eps):
x_ = image
n = 784
x_ = x_.view(1,1,28,28)
i = 0
eta = np.zeros((1,n))
Gamma = dict()
for k in range(0, n):
Gamma[k] = 1
#############################################################
while i < Imax and len(Gamma) > 1:# and f(x_) == y:
y_pred = f(x_.cuda()).detach().max(1,keepdims=True)[1].item()
if y_pred != y.item():
break
gamma = 0
J = torch.autograd.functional.jacobian(f,x_.cuda()).cpu()
for k in Gamma:
wk = k//28
hk = k%28
for l in Gamma:
alpha = 0
beta = 0
wl = l//28
hl = l%28
if k != l:
for t in range(10):
print("k:{} l:{} t:{}".format(k,l,t))
alpha += J[0,t,0,0,wk,hk]
alpha += J[0,t,0,0,wl,hl]
for m in range(10):
if m != t:
beta += J[0,m,0,0,wk,hk]
beta += J[0,m,0,0,wl,hl]
if -alpha*beta > gamma :
p_star, q_star, gamma = k, l, -alpha*beta
if t == y.item():
theta_prime = -torch.sign(alpha) * th
else:
theta_prime = torch.sign(alpha) * th
else:
continue
if gamma == 0:
break
x_[0,0,p_star//28,p_star%28] = clip_eps(x_[0,0,p_star//28,p_star%28] + theta_prime, eps)
x_[0,0,q_star//28,q_star%28] = clip_eps(x_[0,0,q_star//28,q_star%28] + theta_prime, eps)
if (not (x_[0,0,p_star//28,p_star%28] > 0 and x_[0,0,p_star//28,p_star%28] < 1)) or eta[0,p_star] == -theta_prime:
Gamma.pop(p_star,'Key not found')
if (not (x_[0,0,q_star//28,q_star%28] > 0 and x_[0,0,q_star//28,q_star%28] < 1)) or eta[0,q_star] == -theta_prime:
Gamma.pop(q_star, 'Key not found')
eta[0,p_star] = theta_prime
eta[0,q_star] = theta_prime
i = i + 1
return x_.view(28,28)
question from:
https://stackoverflow.com/questions/65917630/how-to-vectorize-jsma-for-fast-computation-on-google-colab-gpu