[Feat] Add main attack codes.
This commit is contained in:
parent
55bc6718c1
commit
f898f565b3
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1,361 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "310b21b9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### <b>Installation for the StyleGAN Encoder Network</b>\n",
|
||||
"\n",
|
||||
"* Download the pixel2style2pixel (pSp) source code.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"!git clone https://github.com/eladrich/pixel2style2pixel.git pixel2style2pixel\n",
|
||||
"</pre>\n",
|
||||
"\n",
|
||||
"* Ninja is required to load c++ extensions in the pSp encoder.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"!wget https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip\n",
|
||||
"!sudo unzip ninja-linux.zip -d /usr/local/bin/\n",
|
||||
"!sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force\n",
|
||||
"</pre>\n",
|
||||
"\n",
|
||||
"* Download the pre-trained pSp model.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Make a folder that contains pre-trained models\n",
|
||||
"save_path = \"./pretrained_models\"\n",
|
||||
"if not os.path.exists(save_path):\n",
|
||||
" os.makedirs(save_path)\n",
|
||||
"\n",
|
||||
"# Download the pre-trained pSp model.\n",
|
||||
"file_path = os.path.join(save_path, \"psp_ffhq_encode.pt\")\n",
|
||||
"!wget https://postechackr-my.sharepoint.com/:u:/g/personal/dongbinna_postech_ac_kr/EZDa4b7PITZCqrCcVVFrN4UBrFYkZmENV0uFQf1fE5gR5Q?download=1 -O $file_path\n",
|
||||
"</pre>\n",
|
||||
"\n",
|
||||
"* (Option) If the libcudart.so is not found, you may have to set environment variables.\n",
|
||||
" * Then, you may have to run the jupyter-notebook with \"allow-root\" option.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"# The example script:\n",
|
||||
"export PATH=$PATH:/usr/local/cuda-10.0\n",
|
||||
"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-10.0/lib64\n",
|
||||
"</pre>\n",
|
||||
"\n",
|
||||
"* Download the face image alignment model.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"!wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2\n",
|
||||
"!bzip2 -dk shape_predictor_68_face_landmarks.dat.bz2\n",
|
||||
"</pre>\n",
|
||||
"\n",
|
||||
"* Download example images for the inversion.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"!wget https://postechackr-my.sharepoint.com/:u:/g/personal/dongbinna_postech_ac_kr/ETkdz1PCJ_NAtj4NUdiQCVsBv2UpVamnhm-MUcD59NN-GA?download=1 -O inversion_images.zip\n",
|
||||
"!unzip inversion_images.zip\n",
|
||||
"</pre>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3901e999",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### <b>Installation for the Classification Network</b>\n",
|
||||
"\n",
|
||||
"* Download the classification model.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"!wget https://postechackr-my.sharepoint.com/:u:/g/personal/dongbinna_postech_ac_kr/EX15A0wm8MBLrBsT-9ARA-gBZ6W-RwmSw1IgYZzan4dELg?download=1 -O facial_identity_classification_using_transfer_learning_with_ResNet18_resolution_256_normalize_05.pth\n",
|
||||
"</pre>\n",
|
||||
"\n",
|
||||
"* Download the Celeb-HQ facial identity recognition dataset.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"%%capture\n",
|
||||
"!wget https://postechackr-my.sharepoint.com/:u:/g/personal/dongbinna_postech_ac_kr/EcWlrJ2qjtRJtQVIwR1kW2EBTj8le3PdXI4TYsKErsGMcA?download=1 -O CelebA_HQ_facial_identity_dataset_test.zip\n",
|
||||
"!unzip CelebA_HQ_facial_identity_dataset_test.zip -d ./CelebA_HQ_facial_identity_dataset_test\n",
|
||||
"</pre>\n",
|
||||
"\n",
|
||||
"* Download the corrected facial identity dataset.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"%%capture\n",
|
||||
"!wget https://postechackr-my.sharepoint.com/:u:/g/personal/dongbinna_postech_ac_kr/EQIF7ZqRxDJCjTwWiO1xPe4BqpenC93AEpTnRpSOlrPl5g?download=1 -O corrected_facial_identity_images_ResNet18.zip\n",
|
||||
"!unzip corrected_facial_identity_images_ResNet18.zip -d ./corrected_facial_identity_images_ResNet18\n",
|
||||
"</pre>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "99406100",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### <b>Installation for Computing Metrics</b>\n",
|
||||
"\n",
|
||||
"* Download the pre-trained CurricularFace model.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Make a folder that contains pre-trained models\n",
|
||||
"save_path = \"./pretrained_models\"\n",
|
||||
"if not os.path.exists(save_path):\n",
|
||||
" os.makedirs(save_path)\n",
|
||||
"\n",
|
||||
"# Download the pre-trained CurricularFace model.\n",
|
||||
"file_path = os.path.join(save_path, \"CurricularFace_Backbone.pth\")\n",
|
||||
"!wget https://postechackr-my.sharepoint.com/:u:/g/personal/dongbinna_postech_ac_kr/Eaaz-dJcRj1GspN39J3-5GkBbzkOZL6dvXD2MsVwOvwQsg?download=1 -O $file_path\n",
|
||||
"</pre>\n",
|
||||
"\n",
|
||||
"* Download the pre-trained MTCNN model.\n",
|
||||
"\n",
|
||||
"<pre>\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Make a folder that contains pre-trained models\n",
|
||||
"save_path = \"./pretrained_models\"\n",
|
||||
"if not os.path.exists(save_path):\n",
|
||||
" os.makedirs(save_path)\n",
|
||||
"\n",
|
||||
"# Download the pre-trained MTCNN model.\n",
|
||||
"file_path = os.path.join(save_path, \"mtcnn.tar.gz\")\n",
|
||||
"!wget https://postechackr-my.sharepoint.com/:u:/g/personal/dongbinna_postech_ac_kr/EdSEMj6JF7VEpXo296X6fekB0uvcyMRH8LkUj15VafAuJA?download=1 -O $file_path\n",
|
||||
"!tar -zxvf $file_path -C $save_path\n",
|
||||
"</pre>"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "19ebf398",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n",
|
||||
"os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\n",
|
||||
"\n",
|
||||
"from argparse import Namespace\n",
|
||||
"import time\n",
|
||||
"import sys\n",
|
||||
"import pprint\n",
|
||||
"import random\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"from PIL import Image\n",
|
||||
"from IPython.display import display\n",
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torchvision\n",
|
||||
"import torchvision.transforms as transforms\n",
|
||||
"from torchvision import datasets, models, transforms\n",
|
||||
"\n",
|
||||
"sys.path.append(\"./pixel2style2pixel\")\n",
|
||||
"\n",
|
||||
"import dlib\n",
|
||||
"from scripts.align_all_parallel import align_face\n",
|
||||
"from datasets import augmentations\n",
|
||||
"from utils.common import tensor2im, log_input_image\n",
|
||||
"\n",
|
||||
"from models.mtcnn.mtcnn import MTCNN\n",
|
||||
"from models.encoders.model_irse import IR_101\n",
|
||||
"from configs.paths_config import model_paths\n",
|
||||
"from criteria.lpips.lpips import LPIPS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "29796d7e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Facial similarity score\n",
|
||||
"facenet = IR_101(input_size=112)\n",
|
||||
"facenet.load_state_dict(torch.load(\"./pretrained_models/CurricularFace_Backbone.pth\"))\n",
|
||||
"facenet.cuda()\n",
|
||||
"facenet.eval()\n",
|
||||
"mtcnn = MTCNN()\n",
|
||||
"\n",
|
||||
"# LPIPS loss\n",
|
||||
"lpips_loss = LPIPS(net_type='alex')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8f1b99bd",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"img_transforms = transforms.Compose([\n",
|
||||
" transforms.Resize((256, 256)),\n",
|
||||
" transforms.ToTensor(),\n",
|
||||
" transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n",
|
||||
"])\n",
|
||||
"\n",
|
||||
"def image_loader(image_name):\n",
|
||||
" \"\"\"load image, returns cuda tensor\"\"\"\n",
|
||||
" image = Image.open(image_name)\n",
|
||||
" image = img_transforms(image)\n",
|
||||
" image = image.unsqueeze(0)\n",
|
||||
" return image.cuda()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "27f2e9a4",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Evaluate the attack results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7f7977cf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"root = ATTACK_RESULT_FOLDER"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c518edb7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# count all directories in which 20000 queries are conducted\n",
|
||||
"filenames = os.listdir(root)\n",
|
||||
"all_directory = []\n",
|
||||
"for filename in filenames:\n",
|
||||
" log_file = os.path.join(root, filename, 'log')\n",
|
||||
" with open(log_file, 'r') as f:\n",
|
||||
" log = f.readlines()\n",
|
||||
" final_line = log[-1].strip()\n",
|
||||
" if final_line.split(' ')[0] == '20000':\n",
|
||||
" all_directory.append(filename)\n",
|
||||
"\n",
|
||||
"length = len(all_directory)\n",
|
||||
"print(length, 'logs are available')\n",
|
||||
"\n",
|
||||
"all_mses = []\n",
|
||||
"queries = []\n",
|
||||
"all_simils = []\n",
|
||||
"all_lpipses = []\n",
|
||||
"\n",
|
||||
"cnt = 0\n",
|
||||
"for directory in all_directory:\n",
|
||||
" log_file = os.path.join(root, directory, 'log')\n",
|
||||
" with open(log_file, 'r') as f:\n",
|
||||
" log = f.readlines()\n",
|
||||
" mses = []\n",
|
||||
" log_length = len(log)\n",
|
||||
" available = log[len(log) - 12:]\n",
|
||||
" source, target = log[0].split()\n",
|
||||
" for av in available:\n",
|
||||
" query, mse, linf, simil, lpips = av.strip().split()\n",
|
||||
" mses.append(mse)\n",
|
||||
" queries.append(query)\n",
|
||||
" all_mses.append(mses)\n",
|
||||
"\n",
|
||||
" simils = []\n",
|
||||
" lpipses = []\n",
|
||||
" for i in [100, 200, 300, 500, 1000, 2000, 3000, 5000, 8000, 10000, 15000, 20000]:\n",
|
||||
" current_target = image_loader(os.path.join(root, directory, str(i) + '.png'))\n",
|
||||
" source = image_loader(os.path.join(root, directory, 'encoded_source.png'))\n",
|
||||
"\n",
|
||||
" t = facenet(current_target)\n",
|
||||
" s = facenet(source)\n",
|
||||
" simil = torch.bmm(t.unsqueeze(1), s.unsqueeze(2))\n",
|
||||
" simils.append(simil[0][0].item()) # similarity score\n",
|
||||
" lpipses.append(lpips_loss(current_target, source).item()) # LPIPS score\n",
|
||||
" \n",
|
||||
" all_simils.append(simils)\n",
|
||||
" all_lpipses.append(lpipses)\n",
|
||||
" cnt +=1\n",
|
||||
" print(cnt)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"for i in range(12):\n",
|
||||
" summary_mse = 0\n",
|
||||
" for m in all_mses:\n",
|
||||
" summary_mse += float(m[i])\n",
|
||||
" summary_simil = 0\n",
|
||||
" for m in all_simils:\n",
|
||||
" summary_simil += m[i]\n",
|
||||
" summary_lpips = 0\n",
|
||||
" for m in all_lpipses:\n",
|
||||
" summary_lpips += m[i]\n",
|
||||
" print(queries[i], summary_mse / len(all_mses), summary_simil / len(all_mses), summary_lpips / len(all_mses))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9720b4de",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Evaluate the real-world attack results"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cbf563d0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"attack_target = ATTACK_RESULT\n",
|
||||
"attack_source = SOURCE_IMAGE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4e2ebfdc",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"current_target = image_loader(attack_target)\n",
|
||||
"source = image_loader(attack_source)\n",
|
||||
"\n",
|
||||
"t = facenet(current_target)\n",
|
||||
"s = facenet(source)\n",
|
||||
"simil = torch.bmm(t.unsqueeze(1), s.unsqueeze(2))\n",
|
||||
"print(simil[0][0].item())\n",
|
||||
"print(lpips_loss(current_target, source).item())"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Loading…
Reference in New Issue