TGR
|
|
@ -0,0 +1,50 @@
|
|||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class Normalize(nn.Module):
|
||||
|
||||
def __init__(self, mean, std):
|
||||
super(Normalize, self).__init__()
|
||||
self.mean = mean
|
||||
self.std = std
|
||||
|
||||
def forward(self, input):
|
||||
size = input.size()
|
||||
x = input.clone()
|
||||
for i in range(size[1]):
|
||||
x[:, i] = (x[:, i] - self.mean[i]) / self.std[i]
|
||||
return x
|
||||
|
||||
class TfNormalize(nn.Module):
|
||||
|
||||
def __init__(self, mean=0, std=1, mode='tensorflow'):
|
||||
"""
|
||||
mode:
|
||||
'tensorflow':convert data from [0,1] to [-1,1]
|
||||
'torch':(input - mean) / std
|
||||
"""
|
||||
super(TfNormalize, self).__init__()
|
||||
self.mean = mean
|
||||
self.std = std
|
||||
self.mode = mode
|
||||
|
||||
def forward(self, input):
|
||||
size = input.size()
|
||||
x = input.clone()
|
||||
|
||||
if self.mode == 'tensorflow':
|
||||
x = x * 2.0 - 1.0 # convert data from [0,1] to [-1,1]
|
||||
elif self.mode == 'torch':
|
||||
for i in range(size[1]):
|
||||
x[:, i] = (x[:, i] - self.mean[i]) / self.std[i]
|
||||
return x
|
||||
|
||||
|
||||
class Permute(nn.Module):
|
||||
def __init__(self, permutation=[2, 1, 0]):
|
||||
super().__init__()
|
||||
self.permutation = permutation
|
||||
|
||||
def forward(self, input):
|
||||
return input[:, self.permutation]
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
# Token Gradient Regularization
|
||||
Official Pytorch implementation for "Transferable Adversarial Attacks on Vision Transformers with Token Gradient Regularization" (CVPR 2023).
|
||||
|
||||
**[Transferable Adversarial Attacks on Vision Transformers with Token Gradient Regularization](https://arxiv.org/pdf/2303.15754.pdf) (CVPR 2023)**
|
||||
|
||||
## Requirements
|
||||
|
||||
- Python 3.6.13
|
||||
- Pytorch 1.7.1
|
||||
- Torchvision 0.8.2
|
||||
- Numpy 1.19.2
|
||||
- Pillow 8.3.1
|
||||
- Timm 0.4.12
|
||||
- Scipy 1.5.4
|
||||
|
||||
## Experiments
|
||||
|
||||
|
||||
ViT models are all available in [timm](https://github.com/huggingface/pytorch-image-models) library. We consider four surrogate models (vit_base_patch16_224, pit_b_224, cait_s24_224, and visformer_small) and four additional target models (deit_base_distilled_patch16_224, levit_256, convit_base, tnt_s_patch16_224).
|
||||
|
||||
To evaluate CNN models, please download the converted pretrained models from ( https://github.com/ylhz/tf_to_pytorch_model) before running the code. Then place these model checkpoint files in `./models`.
|
||||
|
||||
#### Introduction
|
||||
|
||||
|
||||
- `methods.py` : the implementation for TGR attack.
|
||||
|
||||
- `evaluate.py` : the code for evaluating generated adversarial examples on different ViT models.
|
||||
|
||||
- `evaluate_cnn.py` : the code for evaluating generated adversarial examples on different CNN models.
|
||||
|
||||
|
||||
#### Example Usage
|
||||
|
||||
##### Generate adversarial examples:
|
||||
|
||||
- TGR
|
||||
|
||||
```
|
||||
python attack.py --attack TGR --batch_size 1 --model_name vit_base_patch16_224
|
||||
```
|
||||
|
||||
You can also modify the hyper parameter values to align with the detailed setting in our paper.
|
||||
|
||||
|
||||
##### Evaluate the attack success rate
|
||||
|
||||
- Evaluate on ViT models
|
||||
|
||||
```
|
||||
bash run_evaluate.sh model_vit_base_patch16_224-method_TGR
|
||||
```
|
||||
|
||||
- Evaluate on CNN models
|
||||
|
||||
```
|
||||
python evaluate_cnn.py
|
||||
```
|
||||
|
||||
|
||||
## Citing this work
|
||||
|
||||
If you find this work is useful in your research, please consider citing:
|
||||
|
||||
```
|
||||
@inproceedings{zhang2023transferable,
|
||||
title={Transferable Adversarial Attacks on Vision Transformers with Token Gradient Regularization},
|
||||
author={Zhang, Jianping and Huang, Yizhan and Wu, Weibin and Lyu, Michael R},
|
||||
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
|
||||
pages={16415--16424},
|
||||
year={2023}
|
||||
}
|
||||
```
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
Code refer to: [Towards Transferable Adversarial Attacks on Vision Transformers](https://github.com/zhipeng-wei/PNA-PatchOut) and [tf_to_torch_model](https://github.com/ylhz/tf_to_pytorch_model)
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
import torch
|
||||
import argparse
|
||||
from torch.utils.data import DataLoader
|
||||
import os
|
||||
import pandas as pd
|
||||
import time
|
||||
import pickle as pkl
|
||||
|
||||
from dataset import AdvDataset
|
||||
from utils import BASE_ADV_PATH, ROOT_PATH
|
||||
import methods
|
||||
|
||||
def arg_parse():
|
||||
parser = argparse.ArgumentParser(description='')
|
||||
parser.add_argument('--attack', type=str, default='', help='the name of specific attack method')
|
||||
parser.add_argument('--gpu', type=str, default='0', help='gpu device.')
|
||||
parser.add_argument('--batch_size', type=int, default=20, metavar='N',
|
||||
help='input batch size for reference (default: 16)')
|
||||
parser.add_argument('--model_name', type=str, default='', help='')
|
||||
parser.add_argument('--filename_prefix', type=str, default='', help='')
|
||||
args = parser.parse_args()
|
||||
args.opt_path = os.path.join(BASE_ADV_PATH, 'model_{}-method_{}'.format(args.model_name, args.attack))
|
||||
if not os.path.exists(args.opt_path):
|
||||
os.makedirs(args.opt_path)
|
||||
return args
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = arg_parse()
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
|
||||
|
||||
# loading dataset
|
||||
dataset = AdvDataset(args.model_name, os.path.join(ROOT_PATH, 'clean_resized_images'))
|
||||
data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=0)
|
||||
print (args.attack, args.model_name)
|
||||
|
||||
# Attack
|
||||
attack_method = getattr(methods, args.attack)(args.model_name)
|
||||
|
||||
# Main
|
||||
all_loss_info = {}
|
||||
for batch_idx, batch_data in enumerate(data_loader):
|
||||
if batch_idx%100 == 0:
|
||||
print ('Runing batch_idx', batch_idx)
|
||||
batch_x = batch_data[0]
|
||||
batch_y = batch_data[1]
|
||||
batch_name = batch_data[3]
|
||||
|
||||
adv_inps, loss_info = attack_method(batch_x, batch_y)
|
||||
attack_method._save_images(adv_inps, batch_name, args.opt_path)
|
||||
if loss_info is not None:
|
||||
all_loss_info[batch_name] = loss_info
|
||||
if loss_info is not None:
|
||||
with open(os.path.join(args.opt_path, 'loss_info.json'), 'wb') as opt:
|
||||
pkl.dump(all_loss_info, opt)
|
||||
|
After Width: | Height: | Size: 93 KiB |
|
After Width: | Height: | Size: 86 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 77 KiB |
|
After Width: | Height: | Size: 126 KiB |
|
After Width: | Height: | Size: 94 KiB |
|
After Width: | Height: | Size: 69 KiB |
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 101 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 82 KiB |
|
After Width: | Height: | Size: 108 KiB |
|
After Width: | Height: | Size: 111 KiB |
|
After Width: | Height: | Size: 50 KiB |
|
After Width: | Height: | Size: 91 KiB |
|
After Width: | Height: | Size: 110 KiB |
|
After Width: | Height: | Size: 96 KiB |
|
After Width: | Height: | Size: 100 KiB |
|
After Width: | Height: | Size: 98 KiB |
|
After Width: | Height: | Size: 90 KiB |
|
After Width: | Height: | Size: 115 KiB |
|
After Width: | Height: | Size: 111 KiB |
|
After Width: | Height: | Size: 86 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 116 KiB |
|
After Width: | Height: | Size: 115 KiB |
|
After Width: | Height: | Size: 105 KiB |
|
After Width: | Height: | Size: 95 KiB |
|
After Width: | Height: | Size: 104 KiB |
|
After Width: | Height: | Size: 95 KiB |
|
After Width: | Height: | Size: 97 KiB |
|
After Width: | Height: | Size: 89 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 60 KiB |
|
After Width: | Height: | Size: 95 KiB |
|
After Width: | Height: | Size: 116 KiB |
|
After Width: | Height: | Size: 128 KiB |
|
After Width: | Height: | Size: 73 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 62 KiB |
|
After Width: | Height: | Size: 78 KiB |
|
After Width: | Height: | Size: 109 KiB |
|
After Width: | Height: | Size: 102 KiB |
|
After Width: | Height: | Size: 118 KiB |
|
After Width: | Height: | Size: 73 KiB |
|
After Width: | Height: | Size: 75 KiB |
|
After Width: | Height: | Size: 93 KiB |
|
After Width: | Height: | Size: 122 KiB |
|
After Width: | Height: | Size: 122 KiB |
|
After Width: | Height: | Size: 90 KiB |
|
After Width: | Height: | Size: 116 KiB |
|
After Width: | Height: | Size: 43 KiB |
|
After Width: | Height: | Size: 132 KiB |
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 82 KiB |
|
After Width: | Height: | Size: 90 KiB |
|
After Width: | Height: | Size: 108 KiB |
|
After Width: | Height: | Size: 87 KiB |
|
After Width: | Height: | Size: 92 KiB |
|
After Width: | Height: | Size: 72 KiB |
|
After Width: | Height: | Size: 79 KiB |
|
After Width: | Height: | Size: 106 KiB |
|
After Width: | Height: | Size: 101 KiB |
|
After Width: | Height: | Size: 94 KiB |
|
After Width: | Height: | Size: 80 KiB |
|
After Width: | Height: | Size: 75 KiB |
|
After Width: | Height: | Size: 72 KiB |
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 74 KiB |
|
After Width: | Height: | Size: 67 KiB |
|
After Width: | Height: | Size: 95 KiB |
|
After Width: | Height: | Size: 96 KiB |
|
After Width: | Height: | Size: 64 KiB |
|
After Width: | Height: | Size: 94 KiB |
|
After Width: | Height: | Size: 129 KiB |
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 108 KiB |
|
After Width: | Height: | Size: 76 KiB |
|
After Width: | Height: | Size: 95 KiB |
|
After Width: | Height: | Size: 57 KiB |
|
After Width: | Height: | Size: 92 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 112 KiB |
|
After Width: | Height: | Size: 112 KiB |
|
After Width: | Height: | Size: 74 KiB |
|
After Width: | Height: | Size: 105 KiB |
|
After Width: | Height: | Size: 102 KiB |
|
After Width: | Height: | Size: 114 KiB |
|
After Width: | Height: | Size: 84 KiB |
|
After Width: | Height: | Size: 116 KiB |
|
After Width: | Height: | Size: 98 KiB |
|
After Width: | Height: | Size: 102 KiB |