192 lines
5.9 KiB
Python
192 lines
5.9 KiB
Python
from PIL import Image
|
|
import numpy as np
|
|
import timm
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
import torchvision.transforms as transforms
|
|
from torch.utils.data import Dataset, DataLoader
|
|
from torch.optim.lr_scheduler import MultiStepLR, StepLR
|
|
|
|
from art.estimators.classification import PyTorchClassifier
|
|
from art.data_generators import PyTorchDataGenerator
|
|
from art.defences.trainer import AdversarialTrainer
|
|
from art.attacks.evasion import ProjectedGradientDescent
|
|
from datasets import load_dataset
|
|
from torchvision.transforms import (CenterCrop,
|
|
Compose,
|
|
Normalize,
|
|
RandomHorizontalFlip,
|
|
RandomResizedCrop,
|
|
Resize,
|
|
ToTensor)
|
|
from tensorflow.keras.utils import to_categorical
|
|
from transformers import ViTImageProcessor
|
|
|
|
processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
|
|
IMAGENET_DEFAULT_MEAN = processor.image_mean
|
|
IMAGENET_DEFAULT_STD = processor.image_std
|
|
|
|
size = processor.size["height"]
|
|
|
|
|
|
"""
|
|
For this example we choose the ResNet18 model as used in the paper (https://proceedings.mlr.press/v97/zhang19p.html)
|
|
The code for the model architecture has been adopted from
|
|
https://github.com/yaodongyu/TRADES/blob/master/models/resnet.py
|
|
"""
|
|
|
|
|
|
model = timm.create_model("timm/vit_base_patch16_224.orig_in21k_ft_in1k", pretrained=False)
|
|
model.head = nn.Linear(model.head.in_features, 10)
|
|
model.load_state_dict(
|
|
torch.hub.load_state_dict_from_url(
|
|
"https://huggingface.co/edadaltocg/vit_base_patch16_224_in21k_ft_cifar10/resolve/main/pytorch_model.bin",
|
|
map_location="cuda",
|
|
file_name="vit_base_patch16_224_in21k_ft_cifar10.pth",
|
|
)
|
|
)
|
|
|
|
|
|
|
|
# Step 1: Load the CIFAR10 dataset
|
|
train_ds, test_ds = load_dataset('cifar10', split=['train[:5000]', 'test[:2000]'])
|
|
splits = train_ds.train_test_split(test_size=0.1)
|
|
train_ds = splits['train']
|
|
val_ds = splits['test']
|
|
|
|
train_size=len(train_ds)
|
|
test_size=len(test_ds)
|
|
|
|
normalize = Normalize(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD)
|
|
_train_transforms = Compose(
|
|
[
|
|
RandomResizedCrop(size),
|
|
RandomHorizontalFlip(),
|
|
ToTensor(),
|
|
normalize,
|
|
]
|
|
)
|
|
|
|
_val_transforms = Compose(
|
|
[
|
|
Resize(size),
|
|
CenterCrop(size),
|
|
ToTensor(),
|
|
normalize,
|
|
]
|
|
)
|
|
|
|
def train_transforms(examples):
|
|
examples['pixel_values'] = [_train_transforms(image.convert("RGB")) for image in examples['img']]
|
|
return examples
|
|
|
|
def val_transforms(examples):
|
|
examples['pixel_values'] = [_val_transforms(image.convert("RGB")) for image in examples['img']]
|
|
return examples
|
|
|
|
train_ds.set_transform(train_transforms)
|
|
val_ds.set_transform(val_transforms)
|
|
test_ds.set_transform(val_transforms)
|
|
|
|
|
|
def collate_fn(examples):
|
|
pixel_values = torch.stack([example["pixel_values"] for example in examples])
|
|
labels = torch.tensor([example["label"] for example in examples])
|
|
return pixel_values,labels
|
|
|
|
train_batch_size = 32
|
|
eval_batch_size = 32
|
|
|
|
def dataset2np(dataset):
|
|
X = []
|
|
Y = []
|
|
for i in range(int(2000)):
|
|
x,y = dataset[i]["pixel_values"], dataset[i]["label"]
|
|
y=to_categorical(y,num_classes=10)
|
|
X.append(x.detach().numpy())
|
|
Y.append(y)
|
|
X = np.array(X).astype("float32")
|
|
Y = np.array(Y).astype("float32")
|
|
return X,Y
|
|
|
|
train_dataloader = DataLoader(train_ds, shuffle=True, collate_fn=collate_fn, batch_size=train_batch_size)
|
|
val_dataloader = DataLoader(val_ds, collate_fn=collate_fn, batch_size=eval_batch_size)
|
|
test_dataloader = DataLoader(test_ds, collate_fn=collate_fn, batch_size=eval_batch_size)
|
|
x_test, y_test=dataset2np(test_ds)
|
|
|
|
|
|
opt = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=2e-4)
|
|
lr_scheduler = StepLR(opt, step_size=3, gamma=0.1)
|
|
|
|
criterion = nn.CrossEntropyLoss()
|
|
|
|
# Step 3: Create the ART classifier
|
|
|
|
classifier = PyTorchClassifier(
|
|
model=model,
|
|
clip_values=(0.0, 1.0),
|
|
loss=criterion,
|
|
optimizer=opt,
|
|
input_shape=(3, size, size),
|
|
nb_classes=10,
|
|
)
|
|
|
|
attack = ProjectedGradientDescent(
|
|
classifier,
|
|
norm=np.inf,
|
|
eps=8.0 / 255.0,
|
|
eps_step=2.0 / 255.0,
|
|
max_iter=10,
|
|
targeted=False,
|
|
num_random_init=1,
|
|
batch_size=128,
|
|
verbose=False,
|
|
)
|
|
|
|
x_test_clean_pred=np.argmax(classifier.predict(x_test), axis=1)
|
|
print(
|
|
"Accuracy on clean samples before adversarial training: %.2f%%"
|
|
% (np.sum(x_test_clean_pred == np.argmax(y_test, axis=1)) / x_test.shape[0] * 100)
|
|
)
|
|
|
|
|
|
# Step 4: Create the trainer object - AdversarialTrainerTRADESPyTorch
|
|
trainer = AdversarialTrainer(
|
|
classifier, attack
|
|
)
|
|
|
|
# Build a Keras image augmentation object and wrap it in ART
|
|
art_datagen = PyTorchDataGenerator(iterator=train_dataloader, size=train_size, batch_size=128)
|
|
|
|
# Step 5: fit the trainer
|
|
trainer.fit_generator(art_datagen, nb_epochs=50)
|
|
|
|
|
|
x_test_pred = np.argmax(classifier.predict(x_test), axis=1)
|
|
print(
|
|
"Accuracy on benign test samples after adversarial training: %.2f%%"
|
|
% (np.sum(x_test_pred == np.argmax(y_test, axis=1)) / x_test.shape[0] * 100)
|
|
)
|
|
|
|
attack_test = ProjectedGradientDescent(
|
|
classifier,
|
|
norm=np.inf,
|
|
eps=8.0 / 255.0,
|
|
eps_step=2.0 / 255.0,
|
|
max_iter=20,
|
|
targeted=False,
|
|
num_random_init=1,
|
|
batch_size=128,
|
|
verbose=False,
|
|
)
|
|
x_test_attack = attack_test.generate(x_test, y=y_test)
|
|
x_test_attack_pred = np.argmax(classifier.predict(x_test_attack), axis=1)
|
|
print(
|
|
"Accuracy on original PGD adversarial samples after adversarial training: %.2f%%"
|
|
% (np.sum(x_test_attack_pred == np.argmax(y_test, axis=1)) / x_test.shape[0] * 100)
|
|
)
|
|
torch.save(trainer.classifier.model.state_dict(), 'cifar10_pgd.pth')
|
|
print(
|
|
"Save the AT model! "
|
|
) |