更新 cifar10.py
This commit is contained in:
parent
e61c116f1d
commit
fca1280172
|
|
@ -0,0 +1,192 @@
|
||||||
|
from PIL import Image
|
||||||
|
import numpy as np
|
||||||
|
import timm
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
import torchvision.transforms as transforms
|
||||||
|
from torch.utils.data import Dataset, DataLoader
|
||||||
|
from torch.optim.lr_scheduler import MultiStepLR, StepLR
|
||||||
|
|
||||||
|
from art.estimators.classification import PyTorchClassifier
|
||||||
|
from art.data_generators import PyTorchDataGenerator
|
||||||
|
from art.defences.trainer import AdversarialTrainer
|
||||||
|
from art.attacks.evasion import ProjectedGradientDescent
|
||||||
|
from datasets import load_dataset
|
||||||
|
from torchvision.transforms import (CenterCrop,
|
||||||
|
Compose,
|
||||||
|
Normalize,
|
||||||
|
RandomHorizontalFlip,
|
||||||
|
RandomResizedCrop,
|
||||||
|
Resize,
|
||||||
|
ToTensor)
|
||||||
|
from tensorflow.keras.utils import to_categorical
|
||||||
|
from transformers import ViTImageProcessor
|
||||||
|
|
||||||
|
processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
|
||||||
|
IMAGENET_DEFAULT_MEAN = processor.image_mean
|
||||||
|
IMAGENET_DEFAULT_STD = processor.image_std
|
||||||
|
|
||||||
|
size = processor.size["height"]
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
For this example we choose the ResNet18 model as used in the paper (https://proceedings.mlr.press/v97/zhang19p.html)
|
||||||
|
The code for the model architecture has been adopted from
|
||||||
|
https://github.com/yaodongyu/TRADES/blob/master/models/resnet.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
model = timm.create_model("timm/vit_base_patch16_224.orig_in21k_ft_in1k", pretrained=False)
|
||||||
|
model.head = nn.Linear(model.head.in_features, 10)
|
||||||
|
model.load_state_dict(
|
||||||
|
torch.hub.load_state_dict_from_url(
|
||||||
|
"https://huggingface.co/edadaltocg/vit_base_patch16_224_in21k_ft_cifar10/resolve/main/pytorch_model.bin",
|
||||||
|
map_location="cuda",
|
||||||
|
file_name="vit_base_patch16_224_in21k_ft_cifar10.pth",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Step 1: Load the CIFAR10 dataset
|
||||||
|
train_ds, test_ds = load_dataset('cifar10', split=['train[:5000]', 'test[:2000]'])
|
||||||
|
splits = train_ds.train_test_split(test_size=0.1)
|
||||||
|
train_ds = splits['train']
|
||||||
|
val_ds = splits['test']
|
||||||
|
|
||||||
|
train_size=len(train_ds)
|
||||||
|
test_size=len(test_ds)
|
||||||
|
|
||||||
|
normalize = Normalize(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD)
|
||||||
|
_train_transforms = Compose(
|
||||||
|
[
|
||||||
|
RandomResizedCrop(size),
|
||||||
|
RandomHorizontalFlip(),
|
||||||
|
ToTensor(),
|
||||||
|
normalize,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
_val_transforms = Compose(
|
||||||
|
[
|
||||||
|
Resize(size),
|
||||||
|
CenterCrop(size),
|
||||||
|
ToTensor(),
|
||||||
|
normalize,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def train_transforms(examples):
|
||||||
|
examples['pixel_values'] = [_train_transforms(image.convert("RGB")) for image in examples['img']]
|
||||||
|
return examples
|
||||||
|
|
||||||
|
def val_transforms(examples):
|
||||||
|
examples['pixel_values'] = [_val_transforms(image.convert("RGB")) for image in examples['img']]
|
||||||
|
return examples
|
||||||
|
|
||||||
|
train_ds.set_transform(train_transforms)
|
||||||
|
val_ds.set_transform(val_transforms)
|
||||||
|
test_ds.set_transform(val_transforms)
|
||||||
|
|
||||||
|
|
||||||
|
def collate_fn(examples):
|
||||||
|
pixel_values = torch.stack([example["pixel_values"] for example in examples])
|
||||||
|
labels = torch.tensor([example["label"] for example in examples])
|
||||||
|
return pixel_values,labels
|
||||||
|
|
||||||
|
train_batch_size = 32
|
||||||
|
eval_batch_size = 32
|
||||||
|
|
||||||
|
def dataset2np(dataset):
|
||||||
|
X = []
|
||||||
|
Y = []
|
||||||
|
for i in range(int(2000)):
|
||||||
|
x,y = dataset[i]["pixel_values"], dataset[i]["label"]
|
||||||
|
y=to_categorical(y,num_classes=10)
|
||||||
|
X.append(x.detach().numpy())
|
||||||
|
Y.append(y)
|
||||||
|
X = np.array(X).astype("float32")
|
||||||
|
Y = np.array(Y).astype("float32")
|
||||||
|
return X,Y
|
||||||
|
|
||||||
|
train_dataloader = DataLoader(train_ds, shuffle=True, collate_fn=collate_fn, batch_size=train_batch_size)
|
||||||
|
val_dataloader = DataLoader(val_ds, collate_fn=collate_fn, batch_size=eval_batch_size)
|
||||||
|
test_dataloader = DataLoader(test_ds, collate_fn=collate_fn, batch_size=eval_batch_size)
|
||||||
|
x_test, y_test=dataset2np(test_ds)
|
||||||
|
|
||||||
|
|
||||||
|
opt = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=2e-4)
|
||||||
|
lr_scheduler = StepLR(opt, step_size=3, gamma=0.1)
|
||||||
|
|
||||||
|
criterion = nn.CrossEntropyLoss()
|
||||||
|
|
||||||
|
# Step 3: Create the ART classifier
|
||||||
|
|
||||||
|
classifier = PyTorchClassifier(
|
||||||
|
model=model,
|
||||||
|
clip_values=(0.0, 1.0),
|
||||||
|
loss=criterion,
|
||||||
|
optimizer=opt,
|
||||||
|
input_shape=(3, size, size),
|
||||||
|
nb_classes=10,
|
||||||
|
)
|
||||||
|
|
||||||
|
attack = ProjectedGradientDescent(
|
||||||
|
classifier,
|
||||||
|
norm=np.inf,
|
||||||
|
eps=8.0 / 255.0,
|
||||||
|
eps_step=2.0 / 255.0,
|
||||||
|
max_iter=10,
|
||||||
|
targeted=False,
|
||||||
|
num_random_init=1,
|
||||||
|
batch_size=128,
|
||||||
|
verbose=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
x_test_clean_pred=np.argmax(classifier.predict(x_test), axis=1)
|
||||||
|
print(
|
||||||
|
"Accuracy on clean samples before adversarial training: %.2f%%"
|
||||||
|
% (np.sum(x_test_clean_pred == np.argmax(y_test, axis=1)) / x_test.shape[0] * 100)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Step 4: Create the trainer object - AdversarialTrainerTRADESPyTorch
|
||||||
|
trainer = AdversarialTrainer(
|
||||||
|
classifier, attack
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build a Keras image augmentation object and wrap it in ART
|
||||||
|
art_datagen = PyTorchDataGenerator(iterator=train_dataloader, size=train_size, batch_size=128)
|
||||||
|
|
||||||
|
# Step 5: fit the trainer
|
||||||
|
trainer.fit_generator(art_datagen, nb_epochs=50)
|
||||||
|
|
||||||
|
|
||||||
|
x_test_pred = np.argmax(classifier.predict(x_test), axis=1)
|
||||||
|
print(
|
||||||
|
"Accuracy on benign test samples after adversarial training: %.2f%%"
|
||||||
|
% (np.sum(x_test_pred == np.argmax(y_test, axis=1)) / x_test.shape[0] * 100)
|
||||||
|
)
|
||||||
|
|
||||||
|
attack_test = ProjectedGradientDescent(
|
||||||
|
classifier,
|
||||||
|
norm=np.inf,
|
||||||
|
eps=8.0 / 255.0,
|
||||||
|
eps_step=2.0 / 255.0,
|
||||||
|
max_iter=20,
|
||||||
|
targeted=False,
|
||||||
|
num_random_init=1,
|
||||||
|
batch_size=128,
|
||||||
|
verbose=False,
|
||||||
|
)
|
||||||
|
x_test_attack = attack_test.generate(x_test, y=y_test)
|
||||||
|
x_test_attack_pred = np.argmax(classifier.predict(x_test_attack), axis=1)
|
||||||
|
print(
|
||||||
|
"Accuracy on original PGD adversarial samples after adversarial training: %.2f%%"
|
||||||
|
% (np.sum(x_test_attack_pred == np.argmax(y_test, axis=1)) / x_test.shape[0] * 100)
|
||||||
|
)
|
||||||
|
torch.save(trainer.classifier.model.state_dict(), 'cifar10_pgd.pth')
|
||||||
|
print(
|
||||||
|
"Save the AT model! "
|
||||||
|
)
|
||||||
122
svhn.py
122
svhn.py
|
|
@ -1,122 +0,0 @@
|
||||||
import warnings
|
|
||||||
|
|
||||||
import torchvision.datasets
|
|
||||||
|
|
||||||
warnings.filterwarnings('ignore')
|
|
||||||
|
|
||||||
from PIL import Image
|
|
||||||
import torch
|
|
||||||
import timm
|
|
||||||
import requests
|
|
||||||
import numpy as np
|
|
||||||
import torchvision.transforms as transforms
|
|
||||||
from torch import nn
|
|
||||||
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
|
|
||||||
from torch.utils.data import Dataset, DataLoader
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from art.estimators.classification import PyTorchClassifier
|
|
||||||
from art.data_generators import PyTorchDataGenerator
|
|
||||||
from art.utils import load_cifar10
|
|
||||||
from art.attacks.evasion import ProjectedGradientDescent
|
|
||||||
from art.defences.trainer import AdversarialTrainer
|
|
||||||
|
|
||||||
model = timm.create_model("timm/vit_base_patch16_224.orig_in21k_ft_in1k", pretrained=False)
|
|
||||||
model.head = nn.Linear(model.head.in_features, 10)
|
|
||||||
state_dict = torch.load('/home/leewlving/.cache/torch/hub/checkpoints/vit_base_patch16_224_in21k_ft_cifar10.pth')
|
|
||||||
model.load_state_dict(state_dict)
|
|
||||||
# model.load_state_dict(
|
|
||||||
# torch.hub.load_state_dict_from_url(
|
|
||||||
# "https://huggingface.co/edadaltocg/vit_base_patch16_224_in21k_ft_cifar10/resolve/main/pytorch_model.bin",
|
|
||||||
# map_location="cuda",
|
|
||||||
# file_name="vit_base_patch16_224_in21k_ft_cifar10.pth",
|
|
||||||
# )
|
|
||||||
# )
|
|
||||||
model.eval()
|
|
||||||
|
|
||||||
DEFAULT_MEAN = (0.485, 0.456, 0.406)
|
|
||||||
DEFAULT_STD = (0.229, 0.224, 0.225)
|
|
||||||
|
|
||||||
transform = transforms.Compose([
|
|
||||||
transforms.Resize(256, interpolation=3),
|
|
||||||
transforms.CenterCrop(224),
|
|
||||||
transforms.ToTensor(),
|
|
||||||
transforms.Normalize(DEFAULT_MEAN, DEFAULT_STD),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
class CIFAR10_dataset(Dataset):
|
|
||||||
def __init__(self, data, targets, transform=None):
|
|
||||||
self.data = data
|
|
||||||
self.targets = torch.LongTensor(targets)
|
|
||||||
self.transform = transform
|
|
||||||
|
|
||||||
def __getitem__(self, index):
|
|
||||||
x = Image.fromarray(((self.data[index] * 255).round()).astype(np.uint8).transpose(1, 2, 0))
|
|
||||||
x = self.transform(x)
|
|
||||||
y = self.targets[index]
|
|
||||||
return x, y
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self.data)
|
|
||||||
|
|
||||||
|
|
||||||
# (x_train, y_train), (x_test, y_test), min_pixel_value, max_pixel_value = load_cifar10()
|
|
||||||
# print(max_pixel_value)
|
|
||||||
# x_train = x_train.transpose(0, 3, 1, 2).astype("float32")
|
|
||||||
# x_test = x_test.transpose(0, 3, 1, 2).astype("float32")
|
|
||||||
train_dataset = torchvision.datasets.SVHN(root='./svhn',split='train',download=True,transform=transform)
|
|
||||||
test_dataset= torchvision.datasets.SVHN(root='./svhn',split='test',download=True,transform=transform)
|
|
||||||
# dataset = CIFAR10_dataset(x_train, y_train, transform=transform)
|
|
||||||
dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True)
|
|
||||||
test_dataloader =DataLoader(test_dataset, batch_size=64, shuffle=False)
|
|
||||||
|
|
||||||
opt = torch.optim.Adam(model.parameters(), lr=0.01)
|
|
||||||
|
|
||||||
|
|
||||||
criterion = nn.CrossEntropyLoss()
|
|
||||||
|
|
||||||
classifier = PyTorchClassifier(
|
|
||||||
model=model,
|
|
||||||
clip_values=(0.0, 1.0),
|
|
||||||
loss=criterion,
|
|
||||||
optimizer=opt,
|
|
||||||
input_shape=(3, 224, 224),
|
|
||||||
nb_classes=10,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
attack = ProjectedGradientDescent(
|
|
||||||
classifier,
|
|
||||||
norm=np.inf,
|
|
||||||
eps=8.0 / 255.0,
|
|
||||||
eps_step=2.0 / 255.0,
|
|
||||||
max_iter=10,
|
|
||||||
targeted=False,
|
|
||||||
num_random_init=1,
|
|
||||||
batch_size=64,
|
|
||||||
verbose=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
trainer = AdversarialTrainer(
|
|
||||||
classifier, attack
|
|
||||||
)
|
|
||||||
art_datagen = PyTorchDataGenerator(iterator=dataloader, size=len(train_dataset), batch_size=64)
|
|
||||||
|
|
||||||
trainer.fit_generator(art_datagen, nb_epochs=1)
|
|
||||||
|
|
||||||
# for i, data in enumerate(test_dataloader):
|
|
||||||
# x, y = data
|
|
||||||
# x = x.numpy()
|
|
||||||
# y = y.numpy()
|
|
||||||
# # print(x.shape)
|
|
||||||
# # print(y.shape)
|
|
||||||
# x_test_pred = np.argmax(classifier.predict(x), axis=1)
|
|
||||||
# print(
|
|
||||||
# "Accuracy on benign test samples after adversarial training: %.2f%%"
|
|
||||||
# % (np.sum(x_test_pred == np.argmax(y, axis=1)) / x.shape[0] * 100)
|
|
||||||
# )
|
|
||||||
|
|
||||||
# trainer.classifier.save('AT-cifar10.pth')
|
|
||||||
torch.save(trainer.classifier.model.state_dict(), 'AT-svhn.pth')
|
|
||||||
|
|
||||||
Loading…
Reference in New Issue