Compare commits

...

9 Commits

Author SHA1 Message Date
leewlving 5b84447a3d add 2024-06-21 10:12:04 +08:00
leewlving 9539ba5aed change mat to json 2024-06-19 15:28:40 +08:00
leewlving a0b9e70922 locate 2024-06-18 23:01:06 +08:00
leewlving 9170cbd6e9 get clean embedding from bert 2024-06-18 22:55:03 +08:00
leewlving 73c901a18c new update 2 modality 2024-06-18 22:40:19 +08:00
leewlving 053a58b07a update i2t and t2i 2024-06-18 22:19:25 +08:00
leewlving cb449df1c5 use clip decoder 2024-06-18 21:28:14 +08:00
leewlving 312671bdc7 change text tokenizer to bert 2024-06-18 20:27:29 +08:00
leewlving d35f23c5ed add text attack 2024-06-18 17:55:35 +08:00
11 changed files with 1069 additions and 265 deletions

View File

@ -9,6 +9,11 @@ import random
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from model.simple_tokenizer import SimpleTokenizer as Tokenizer
import re
import os
import numpy as np
from torchvision import transforms
class BaseDataset(Dataset):
@ -61,7 +66,28 @@ class BaseDataset(Dataset):
return image
def pre_caption(self, caption):
caption = re.sub(
r"([,.'!?\"()*#:;~])",
'',
caption.lower(),
).replace('-', ' ').replace('/', ' ').replace('<person>', 'person')
caption = re.sub(
r"\s{2,}",
' ',
caption,
)
caption = caption.rstrip('\n')
caption = caption.strip(' ')
caption_words = caption.split(' ')
if len(caption_words)>self.maxWords:
caption = ' '.join(caption_words[:self.maxWords])
return caption
def _load_text(self, index: int):
# print(self.captions[index])
# captions = self.pre_caption(self.captions[index])
# captions =whitespace_clean(self.captions[index]).lower()
captions = self.captions[index]
use_cap = captions[random.randint(0, len(captions) - 1)]
@ -78,7 +104,7 @@ class BaseDataset(Dataset):
caption.append(0)
caption = torch.tensor(caption)
return caption
return captions
def _load_label(self, index: int) -> torch.Tensor:
label = self.labels[index]
@ -100,3 +126,175 @@ class BaseDataset(Dataset):
return image, caption, label, index
# def default_loader(path):
# return Image.open(path).convert('RGB')
# class IaprDataset(Dataset):
# def __init__(self, args,txt,transform=None, loader=default_loader):
# self.transform = transform
# self.loader = loader
# name_label = []
# for line in open(txt):
# line = line.strip('\n').split()
# label = list(map(int, np.array(line[len(line)-255:]))) #后255个二进制码是label的前2912个是单词在词袋中的二进制编码
# tem = re.split('[/.]', line[0])
# file_name, sample_name = tem[0], tem[1]
# name_label.append([file_name, sample_name, label])
# # # print('label = ', label)
# # print('file_name = %s, sample_name = %s' %(file_name, sample_name))
# # label_list = np.where(label=='1')
# # print('label_list = ', label_list)
# self.name_label = name_label
# self.image_dir=args.image_dir
# self.text_dir = args.text_dir
# def __getitem__(self, index):
# words = self.name_label[index] # words = [file_name, sample_name, label]
# # print('words = ', words[0:2])
# img_path = os.path.join(self.image_dir, words[0], words[1]+'.jpg')
# text_path = os.path.join(self.text_dir, words[0], words[1]+'.txt')
# # img
# img = self.loader(img_path)
# if self.transform is not None:
# img = self.transform(img)
# # text
# text = 'None'
# for line in open(text_path):
# text = '[CLS]' + line + '[SEP]'
# # label
# label = torch.LongTensor(words[2])
# # image, caption, label, index
# return img, text, label, index
# def __len__(self):
# return len(self.name_label)
default_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
class MScocoDataset(Dataset):
def __init__(self, data_path, img_filename, text_filename, label_filename, transform=None):
self.data_path = data_path
if transform is None:
self.transform = default_transform
else:
self.transform = transform
img_filepath = os.path.join(data_path, img_filename)
with open(img_filepath, 'r') as f:
self.imgs = [x.strip() for x in f]
text_filepath = os.path.join(data_path, text_filename)
with open(text_filepath, 'r') as f:
self.texts = f.readlines()
self.texts = [i.replace('\n', '') for i in self.texts]
label_filepath = os.path.join(data_path, label_filename)
self.labels = np.genfromtxt(label_filepath, dtype=np.int32)
def __getitem__(self, index):
img = Image.open(os.path.join(self.data_path, self.imgs[index]))
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = torch.from_numpy(self.labels[index]).float()
text = self.texts[index]
# image, caption, label, index
return img, text, label, index
def __len__(self):
return len(self.imgs)
class MirflickrDataset(Dataset):
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
file_path = os.path.join(root_dir, "mirflickr25k_annotations_v080")
file_list = os.listdir(file_path)
file_list = [item for item in file_list if "_r1" not in item and "README" not in item]
self.class_index = {}
for i, item in enumerate(file_list):
self.class_index.update({item: i})
self.label_dict = {}
for path_id in file_list:
path = os.path.join(file_path, path_id)
with open(path, "r") as f:
for item in f:
item = item.strip()
if item not in self.label_dict:
label = np.zeros(len(file_list))
label[self.class_index[path_id]] = 1
self.label_dict.update({item: label})
else:
# print()
self.label_dict[item][self.class_index[path_id]] = 1
self.captions_dict = {}
captions_path = os.path.join(root_dir, "mirflickr/meta/tags")
captions_list = os.listdir(captions_path)
for item in captions_list:
id_ = item.split(".")[0].replace("tags", "")
caption = ""
with open(os.path.join(captions_path, item), "r") as f:
for word in f.readlines():
caption += word.strip() + " "
caption = caption.strip()
self.captions_dict.update({id_: caption})
if transform is None:
self.transform = default_transform
else:
self.transform = transform
def __getitem__(self, index):
label=self.label_dict[index]
PATH = os.path.join(self.root_dir, "mirflickr")
img=Image.open(os.path.join(PATH, "im" + index + ".jpg"))
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
# label = torch.from_numpy(self.labels[index]).float()
text = self.captions_dict[index]
# image, caption, label, index
return img, text, label, index
def __len__(self):
return len(list(self.label_dict.keys()))
class NusWideDataset(Dataset):
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
imageListFile = os.path.join(root_dir, "/Low-Level-Features/ImageList/Imagelist.txt")
labelPath = os.path.join(root_dir, "/nuswide/Groundtruth/AllLabels")
textFile = os.path.join(root_dir, "/Low-Level-Features/NUS_WID_Tags/All_Tags.txt")
def __getitem__(self, index):
label=self.label_dict[index]
PATH = os.path.join(self.root_dir, "mirflickr")
img=Image.open(os.path.join(PATH, "im" + index + ".jpg"))
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
# label = torch.from_numpy(self.labels[index]).float()
text = self.captions_dict[index]
# image, caption, label, index
return img, text, label, index
def __len__(self):
return len(list(self.label_dict.keys()))

View File

@ -1,8 +1,12 @@
from .base import BaseDataset
from .base import BaseDataset, MirflickrDataset, MScocoDataset , IaprDataset
import os
import numpy as np
import scipy.io as scio
import torch
import torchvision.transforms as transforms
import json
from torch.utils.data import Dataset
def split_data(captions, indexs, labels, query_num=5000, train_num=10000, seed=None):
@ -45,8 +49,13 @@ def dataloader(captionFile: str,
with open(captionFile, "r") as f:
captions = f.readlines()
captions = np.asarray([[item.strip()] for item in captions])
elif captionFile.endswith("json"):
with open(captionFile, "r") as f:
data = json.load(f)
captions=data["caption"]
# captions = captions[0] if captions.shape[0] == 1 else captions
else:
raise ValueError("the format of 'captionFile' doesn't support, only support [txt, mat] format.")
raise ValueError("the format of 'captionFile' doesn't support, only support [txt, json, mat] format.")
if not npy:
indexs = scio.loadmat(indexFile)["index"]
else:
@ -65,4 +74,55 @@ def dataloader(captionFile: str,
return train_data, query_data, retrieval_data
def get_dataset_filename(split):
filename = {
'train': ('cm_train_imgs.txt', 'cm_train_txts.txt', 'cm_train_labels.txt'),
'test': ('cm_test_imgs.txt', 'cm_test_txts.txt', 'cm_test_labels.txt'),
'db': ('cm_database_imgs.txt', 'cm_database_txts.txt', 'cm_database_labels.txt')
}
return filename[split]
def cross_modal_dataset(args ):
transform_test = transforms.Compose([
transforms.Resize((args.resolution, args.resolution)),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
])
if args.dataset == 'flickr25k':
dataset=MirflickrDataset(root_dir=args.flickr25k_root)
elif args.dataset == 'coco':
img_name, text_name, label_name = get_dataset_filename('train')
dataset=MScocoDataset(data_path=args.coco_root,img_filename=args.coco_img_root,
text_filename=args.coco_txt_root,label_filename=args.coco_label_root,transform=transform_test)
elif args.dataset == 'iapr':
train_file = os.path.join(args.iapr_root, 'iapr_train')
test_file = os.path.join(args.root, 'iapr_test')
retrieval_file = os.path.join(args.root, 'iapr_retrieval')
train_set=IaprDataset(args,txt=train_file, transform=transform_test)
test_set =IaprDataset(args,txt=test_file, transform=transform_test)
db_set=IaprDataset(args,txt=retrieval_file, transform=transform_test)
train_loader = Dataset.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=4)
test_loader = Dataset.DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=4)
db_loader = Dataset.DataLoader(db_set, batch_size=args.batch_size, shuffle=False, num_workers=4)
else:
raise ValueError("Not support.")
# if args.dataset == 'iapr':
# elif args.dataset == 'coco':
# img_name, text_name, label_name = get_dataset_filename('train')
# pass
# else:
# test_set, db_set=torch.utils.data.random_split(dataset,[0.25, 0.75])
# train_set,_=torch.utils.data.random_split(dataset,[0.65, 0.35])
# train_loader = Dataset.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=4)
# test_loader = Dataset.DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=4)
# db_loader = Dataset.DataLoader(db_set, batch_size=args.batch_size, shuffle=False, num_workers=4)
return train_loader, test_loader, db_loader , train_set, test_set, db_set

View File

@ -1,14 +1,15 @@
import os
import scipy.io as scio
import numpy as np
import json
# mirflickr25k_annotations_v080 and mirflickr
# mkdir mat
# mv make_mirflickr25k.py mat
# python make_mirflickr25k.py
root_dir = "PATH/TO/YOUR/DOWNLOAD/DIR/"
root_dir = "/home/leewlving/Downloads/FireShot/mirflickr25k/"
file_path = os.path.join(root_dir, "/mirflickr25k_annotations_v080")
file_path = os.path.join(root_dir, "mirflickr25k_annotations_v080")
file_list = os.listdir(file_path)
@ -52,7 +53,7 @@ print("index created:", len(index))
index= {"index": index}
captions_path = os.path.join(root_dir, "/mirflickr/meta/tags")
captions_path = os.path.join(root_dir, "mirflickr/meta/tags")
captions_list = os.listdir(captions_path)
captions_dict = {}
for item in captions_list:
@ -72,9 +73,10 @@ for item in keys:
print("captions created:", len(captions))
captions = {"caption": captions}
scio.savemat(os.path.join(root_dir, "/mat/index.mat"), index)
scio.savemat(os.path.join(root_dir, "/mat/caption.mat"), captions)
scio.savemat(os.path.join(root_dir, "/mat/label.mat"), labels)
scio.savemat(os.path.join(root_dir, "mat/index.mat"), index)
with open(os.path.join(root_dir, "mat/caption.json"), 'w', encoding='utf-8') as f:
json.dump(captions, f, ensure_ascii=False)
scio.savemat(os.path.join(root_dir, "mat/label.mat"), labels)

View File

@ -1,4 +1,4 @@
from train.hash_train import Trainer
from train.text_train import Trainer
if __name__ == "__main__":

543
model/bert_tokenizer.py Normal file
View File

@ -0,0 +1,543 @@
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for Bert."""
import collections
import os
import unicodedata
from typing import List, Optional, Tuple
from transformers.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from transformers.utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt",
"bert-large-cased-whole-word-masking": "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt",
"bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt",
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt",
"TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt",
"TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt",
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(PreTrainedTokenizer):
r"""
Construct a BERT tokenizer. Based on WordPiece.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
File containing the vocabulary.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to do basic tokenization before WordPiece.
never_split (:obj:`Iterable`, `optional`):
Collection of tokens which will never be split during tokenization. Only has an effect when
:obj:`do_basic_tokenize=True`
unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this `issue
<https://github.com/huggingface/transformers/issues/328>`__).
strip_accents: (:obj:`bool`, `optional`):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for :obj:`lowercase` (as in the original BERT).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs
):
super().__init__(
do_lower_case=do_lower_case,
do_basic_tokenize=do_basic_tokenize,
never_split=never_split,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
# If the token is part of the never_split set
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
return self.vocab.get(token, self.vocab.get(self.unk_token))
# def _convert_tokens_to_ids(self, tokens):
# """ Converts a token (str) in an id using the vocab. """
# return [self._convert_token_to_id(token) for token in tokens]
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
""" Converts a sequence of tokens (string) in a single string. """
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: ``[CLS] X ``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
else:
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(vocab_file)
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicTokenizer(object):
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to lowercase the input when tokenizing.
never_split (:obj:`Iterable`, `optional`):
Collection of tokens which will never be split during tokenization. Only has an effect when
:obj:`do_basic_tokenize=True`
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this `issue
<https://github.com/huggingface/transformers/issues/328>`__).
strip_accents: (:obj:`bool`, `optional`):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for :obj:`lowercase` (as in the original BERT).
"""
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
def tokenize(self, text, never_split=None):
"""
Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see
WordPieceTokenizer.
Args:
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
:func:`PreTrainedTokenizer.tokenize`) List of token not to split.
"""
# union() returns a new set by concatenating the two sets.
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if token not in never_split:
if self.do_lower_case:
token = token.lower()
if self.strip_accents is not False:
token = self._run_strip_accents(token)
elif self.strip_accents:
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
tokenization using the given vocabulary.
For example, :obj:`input = "unaffable"` wil return as output :obj:`["un", "##aff", "##able"]`.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens

View File

@ -9,6 +9,8 @@ from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer

0
model/model.py Executable file → Normal file
View File

View File

@ -15,9 +15,11 @@ from utils import get_args, calc_neighbor, cosine_similarity, euclidean_similari
from utils.calc_utils import cal_map, cal_pr
from dataset.dataloader import dataloader
import clip
from model.simple_tokenizer import SimpleTokenizer as Tokenizer
# from transformers import BertModel
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# tokenizer=Tokenizer()
def clamp(delta, clean_imgs):
@ -95,6 +97,8 @@ class Trainer(TrainBase):
label_train=[]
for image, text, label, index in self.train_loader:
text=text.to(device, non_blocking=True)
text=clip.tokenize(text)
# print(self.model.vocab_size)
temp_text=self.model.encode_text(text)
text_train.append(temp_text.cpu().detach().numpy())
@ -112,21 +116,19 @@ class Trainer(TrainBase):
text_var_representation[str(centroid.astype(int))]= text_var[i]
return text_representation, text_var_representation
def target_adv(self, image, negetive_code,negetive_mean,negative_var, positive_code,positive_mean,positive_var,
beta=10 ,epsilon=0.03125, alpha=3/255, num_iter=1500, temperature=0.05):
def target_adv(self, image, negetive_code,negetive_mean,negative_var, positive_code,positive_mean,positive_var
,epsilon=0.03125, alpha=3/255):
delta = torch.zeros_like(image,requires_grad=True)
# one=torch.zeros_like(positive)
# alienation_loss = nn.TripletMarginLoss(margin=1.0, p=2, eps=1e-7)
for i in range(num_iter):
for i in range(self.args.epochs):
self.model.zero_grad()
anchor=self.model.encode_image(image+delta)
loss1=F.triplet_margin_with_distance_loss(anchor, positive_code,negetive_code, distance_function=nn.CosineSimilarity())
negative_dist=(anchor-negetive_mean)**2 / negative_var
positive_dist=(anchor-positive_mean)**2 /positive_var
negatives=torch.exp(negative_dist / temperature)
positives= torch.exp(positive_dist / temperature)
loss= torch.log(positives/(positives+negatives)).mean() + beta* loss1
negatives=torch.exp(negative_dist / self.args.temperature)
positives= torch.exp(positive_dist / self.args.temperature)
loss= torch.log(positives/(positives+negatives)).mean() + self.args.beta* loss1
loss.backward(retain_graph=True)
delta.data = delta - alpha * delta.grad.detach().sign()
delta.data =clamp(delta, image).clamp(-epsilon, epsilon)
@ -136,7 +138,7 @@ class Trainer(TrainBase):
def train_epoch(self):
self.change_state(mode="valid")
save_dir = os.path.join(self.args.save_dir, "adv_PR_cruve")
save_dir = os.path.join(self.args.save_dir, "adv_PR_i2t")
all_loss = 0
times = 0
adv_codes=[]
@ -192,7 +194,6 @@ class Trainer(TrainBase):
'r_txt': retrieval_txt,
'adv_l': adv_labels,
'r_l': retrieval_labels
# 'q_l':query_labels
# 'pr': pr,
# 'pr_t': pr_t
}
@ -204,28 +205,9 @@ class Trainer(TrainBase):
def train(self):
self.logger.info("Start train.")
for epoch in range(self.args.epochs):
self.train_epoch(epoch)
self.valid(epoch)
self.save_model(epoch)
self.logger.info(f">>>>>>> FINISHED >>>>>> Best epoch, I-T: {self.best_epoch_i}, mAP: {self.max_mapi2t}, T-I: {self.best_epoch_t}, mAP: {self.max_mapt2i}")
def make_hash_code(self, code: list) -> torch.Tensor:
code = torch.stack(code)
# print(code.shape)
code = code.permute(1, 0, 2)
hash_code = torch.argmax(code, dim=-1)
hash_code[torch.where(hash_code == 0)] = -1
hash_code = hash_code.float()
return hash_code
def get_code(self, data_loader, length: int):
@ -247,9 +229,7 @@ class Trainer(TrainBase):
def valid_attack(self,adv_images, texts, adv_labels):
save_dir = os.path.join(self.args.save_dir, "adv_PR_cruve")
os.makedirs(save_dir, exist_ok=True)
@ -287,49 +267,6 @@ class Trainer(TrainBase):
self.logger.info(">>>>>> save all data!")
# def valid(self, epoch):
# self.logger.info("Valid.")
# self.change_state(mode="valid")
# query_img, query_txt = self.get_code(self.query_loader, self.args.query_num) if self.args.hash_layer == "select" else super().get_code(self.query_loader, self.args.query_num)
# retrieval_img, retrieval_txt = self.get_code(self.retrieval_loader, self.args.retrieval_num) if self.args.hash_layer == "select" else super().get_code(self.retrieval_loader, self.args.retrieval_num)
# # print("get all code")
# mAPi2t = calc_map_k(query_img, retrieval_txt, self.query_labels, self.retrieval_labels, None, self.rank)
# # print("map map")
# mAPt2i = calc_map_k(query_txt, retrieval_img, self.query_labels, self.retrieval_labels, None, self.rank)
# mAPi2i = calc_map_k(query_img, retrieval_img, self.query_labels, self.retrieval_labels, None, self.rank)
# mAPt2t = calc_map_k(query_txt, retrieval_txt, self.query_labels, self.retrieval_labels, None, self.rank)
# if self.max_mapi2t < mAPi2t:
# self.best_epoch_i = epoch
# self.save_mat(query_img, query_txt, retrieval_img, retrieval_txt, mode_name="i2t")
# self.max_mapi2t = max(self.max_mapi2t, mAPi2t)
# if self.max_mapt2i < mAPt2i:
# self.best_epoch_t = epoch
# self.save_mat(query_img, query_txt, retrieval_img, retrieval_txt, mode_name="t2i")
# self.max_mapt2i = max(self.max_mapt2i, mAPt2i)
# self.logger.info(f">>>>>> [{epoch}/{self.args.epochs}], MAP(i->t): {mAPi2t}, MAP(t->i): {mAPt2i}, MAP(t->t): {mAPt2t}, MAP(i->i): {mAPi2i}, \
# MAX MAP(i->t): {self.max_mapi2t}, MAX MAP(t->i): {self.max_mapt2i}")
def save_mat(self, query_img, query_txt, retrieval_img, retrieval_txt, mode_name="i2t"):
save_dir = os.path.join(self.args.save_dir, "PR_cruve")
os.makedirs(save_dir, exist_ok=True)
query_img = query_img.cpu().detach().numpy()
query_txt = query_txt.cpu().detach().numpy()
retrieval_img = retrieval_img.cpu().detach().numpy()
retrieval_txt = retrieval_txt.cpu().detach().numpy()
query_labels = self.query_labels.numpy()
retrieval_labels = self.retrieval_labels.numpy()
result_dict = {
'q_img': query_img,
'q_txt': query_txt,
'r_img': retrieval_img,
'r_txt': retrieval_txt,
'q_l': query_labels,
'r_l': retrieval_labels
}
scio.savemat(os.path.join(save_dir, str(self.args.output_dim) + "-ours-" + self.args.dataset + "-" + mode_name + ".mat"), result_dict)
self.logger.info(f">>>>>> save best {mode_name} data!")

View File

@ -13,18 +13,119 @@ from .base import TrainBase
from torch.nn import functional as F
from utils import get_args, calc_neighbor, cosine_similarity, euclidean_similarity,find_indices
from utils.calc_utils import cal_map, cal_pr
from dataset.dataloader import dataloader
from dataset.dataloader import cross_modal_dataset
import clip
import copy
from model.bert_tokenizer import BertTokenizer
from model.simple_tokenizer import SimpleTokenizer as Tokenizer
from transformers import BertForMaskedLM
# from transformers import BertModel
import ftfy
import regex as re
import html
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def clamp(delta, clean_imgs):
filter_words = ['a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against', 'ain', 'all', 'almost',
'alone', 'along', 'already', 'also', 'although', 'am', 'among', 'amongst', 'an', 'and', 'another',
'any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'aren', "aren't", 'around', 'as',
'at', 'back', 'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside', 'besides',
'between', 'beyond', 'both', 'but', 'by', 'can', 'cannot', 'could', 'couldn', "couldn't", 'd', 'didn',
"didn't", 'doesn', "doesn't", 'don', "don't", 'down', 'due', 'during', 'either', 'else', 'elsewhere',
'empty', 'enough', 'even', 'ever', 'everyone', 'everything', 'everywhere', 'except', 'first', 'for',
'former', 'formerly', 'from', 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'he', 'hence',
'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', 'himself', 'his',
'how', 'however', 'hundred', 'i', 'if', 'in', 'indeed', 'into', 'is', 'isn', "isn't", 'it', "it's",
'its', 'itself', 'just', 'latter', 'latterly', 'least', 'll', 'may', 'me', 'meanwhile', 'mightn',
"mightn't", 'mine', 'more', 'moreover', 'most', 'mostly', 'must', 'mustn', "mustn't", 'my', 'myself',
'namely', 'needn', "needn't", 'neither', 'never', 'nevertheless', 'next', 'no', 'nobody', 'none',
'noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'o', 'of', 'off', 'on', 'once', 'one', 'only',
'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours', 'ourselves', 'out', 'over', 'per',
'please', 's', 'same', 'shan', "shan't", 'she', "she's", "should've", 'shouldn', "shouldn't", 'somehow',
'something', 'sometime', 'somewhere', 'such', 't', 'than', 'that', "that'll", 'the', 'their', 'theirs',
'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', 'therefore', 'therein',
'thereupon', 'these', 'they', 'this', 'those', 'through', 'throughout', 'thru', 'thus', 'to', 'too',
'toward', 'towards', 'under', 'unless', 'until', 'up', 'upon', 'used', 've', 'was', 'wasn', "wasn't",
'we', 'were', 'weren', "weren't", 'what', 'whatever', 'when', 'whence', 'whenever', 'where',
'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', 'while',
'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why', 'with', 'within', 'without', 'won',
"won't", 'would', 'wouldn', "wouldn't", 'y', 'yet', 'you', "you'd", "you'll", "you're", "you've",
'your', 'yours', 'yourself', 'yourselves', '.', '-', 'a the', '/', '?', 'some', '"', ',', 'b', '&', '!',
'@', '%', '^', '*', '(', ')', "-", '-', '+', '=', '<', '>', '|', ':', ";", '', '·']
filter_words = set(filter_words)
clamp_imgs = (delta.data + clean_imgs.data).clamp(0, 1)
clamp_delta = clamp_imgs - clean_imgs.data
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
return clamp_delta
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
def get_bpe_substitues(substitutes, tokenizer, mlm_model):
# substitutes L, k
# device = mlm_model.device
substitutes = substitutes[0:12, 0:4] # maximum BPE candidates
# find all possible candidates
all_substitutes = []
for i in range(substitutes.size(0)):
if len(all_substitutes) == 0:
lev_i = substitutes[i]
all_substitutes = [[int(c)] for c in lev_i]
else:
lev_i = []
for all_sub in all_substitutes:
for j in substitutes[i]:
lev_i.append(all_sub + [int(j)])
all_substitutes = lev_i
# all substitutes list of list of token-id (all candidates)
c_loss = nn.CrossEntropyLoss(reduction='none')
word_list = []
# all_substitutes = all_substitutes[:24]
all_substitutes = torch.tensor(all_substitutes) # [ N, L ]
all_substitutes = all_substitutes[:24].to(device)
# print(substitutes.size(), all_substitutes.size())
N, L = all_substitutes.size()
word_predictions = mlm_model(all_substitutes)[0] # N L vocab-size
ppl = c_loss(word_predictions.view(N * L, -1), all_substitutes.view(-1)) # [ N*L ]
ppl = torch.exp(torch.mean(ppl.view(N, L), dim=-1)) # N
_, word_list = torch.sort(ppl)
word_list = [all_substitutes[i] for i in word_list]
final_words = []
for word in word_list:
tokens = [tokenizer._convert_id_to_token(int(i)) for i in word]
text = tokenizer.convert_tokens_to_string(tokens)
final_words.append(text)
return final_words
def get_substitues(substitutes, tokenizer, mlm_model, use_bpe, substitutes_score=None, threshold=3.0):
# substitues L,k
# from this matrix to recover a word
words = []
sub_len, k = substitutes.size() # sub-len, k
if sub_len == 0:
return words
elif sub_len == 1:
for (i, j) in zip(substitutes[0], substitutes_score[0]):
if threshold != 0 and j < threshold:
break
words.append(tokenizer._convert_id_to_token(int(i)))
else:
if use_bpe == 1:
words = get_bpe_substitues(substitutes, tokenizer, mlm_model)
else:
return words
#
# print(words)
return words
class Trainer(TrainBase):
@ -37,6 +138,9 @@ class Trainer(TrainBase):
self.image_mean=image_mean
self.image_var=image_var
self.device=rank
self.clip_tokenizer=Tokenizer()
self.bert_tokenizer=BertTokenizer.from_pretrained(self.args.text_encoder,do_lower_case=True)
self.ref_net = BertForMaskedLM.from_pretrained(self.args.text_encoder)
# self.run()
def _init_model(self):
@ -49,46 +153,10 @@ class Trainer(TrainBase):
def _init_dataset(self):
self.logger.info("init dataset.")
self.logger.info(f"Using {self.args.dataset} dataset.")
self.args.index_file = os.path.join("./dataset", self.args.dataset, self.args.index_file)
self.args.caption_file = os.path.join("./dataset", self.args.dataset, self.args.caption_file)
self.args.label_file = os.path.join("./dataset", self.args.dataset, self.args.label_file)
train_data, query_data, retrieval_data = dataloader(captionFile=self.args.caption_file,
indexFile=self.args.index_file,
labelFile=self.args.label_file,
maxWords=self.args.max_words,
imageResolution=self.args.resolution,
query_num=self.args.query_num,
train_num=self.args.train_num,
seed=self.args.seed)
self.train_labels = train_data.get_all_label()
self.query_labels = query_data.get_all_label()
self.retrieval_labels = retrieval_data.get_all_label()
self.args.retrieval_num = len(self.retrieval_labels)
self.logger.info(f"query shape: {self.query_labels.shape}")
self.logger.info(f"retrieval shape: {self.retrieval_labels.shape}")
self.train_loader = DataLoader(
dataset=train_data,
batch_size=self.args.batch_size,
num_workers=self.args.num_workers,
pin_memory=True,
shuffle=True
)
self.query_loader = DataLoader(
dataset=query_data,
batch_size=self.args.batch_size,
num_workers=self.args.num_workers,
pin_memory=True,
shuffle=True
)
self.retrieval_loader = DataLoader(
dataset=retrieval_data,
batch_size=self.args.batch_size,
num_workers=self.args.num_workers,
pin_memory=True,
shuffle=True
)
self.train_data=train_data
train_loader, test_loader, db_loader , train_set, test_set, db_set=cross_modal_dataset(self.args)
self.train_loader=train_loader
self.test_loader=test_loader
self.retrieval_loader=db_loader
def generate_mapping(self):
image_train=[]
@ -96,6 +164,7 @@ class Trainer(TrainBase):
for image, text, label, index in self.train_loader:
image=image.to(device, non_blocking=True)
# print(self.model.vocab_size)
text = self.clip_tokenizer.tokenize(text)
temp_image=self.model.encode_image(image)
image_train.append(temp_image.cpu().detach().numpy())
label_train.append(label.detach().numpy())
@ -112,31 +181,120 @@ class Trainer(TrainBase):
image_var_representation[str(centroid.astype(int))]= image_var[i]
return image_representation, image_var_representation
def target_adv(self, image, negetive_code,negetive_mean,negative_var, positive_code,positive_mean,positive_var,
beta=10 ,epsilon=0.03125, alpha=3/255, num_iter=1500, temperature=0.05):
def _tokenize(self, text):
words = text.split(' ')
delta = torch.zeros_like(image,requires_grad=True)
# one=torch.zeros_like(positive)
# alienation_loss = nn.TripletMarginLoss(margin=1.0, p=2, eps=1e-7)
for i in range(num_iter):
self.model.zero_grad()
anchor=self.model.encode_image(image+delta)
loss1=F.triplet_margin_with_distance_loss(anchor, positive_code,negetive_code, distance_function=nn.CosineSimilarity())
negative_dist=(anchor-negetive_mean)**2 / negative_var
positive_dist=(anchor-positive_mean)**2 /positive_var
negatives=torch.exp(negative_dist / temperature)
positives= torch.exp(positive_dist / temperature)
loss= torch.log(positives/(positives+negatives)).mean() + beta* loss1
loss.backward(retain_graph=True)
delta.data = delta - alpha * delta.grad.detach().sign()
delta.data =clamp(delta, image).clamp(-epsilon, epsilon)
delta.grad.zero_()
adv_code=self.model.encode_image(image+delta)
return delta.detach() , adv_code
sub_words = []
keys = []
index = 0
for word in words:
sub = self.bert_tokenizer.tokenize(word)
sub_words += sub
keys.append([index, index + len(sub)])
index += len(sub)
return words, sub_words, keys
def _get_masked(self, text):
words = text.split(' ')
len_text = len(words)
masked_words = []
for i in range(len_text):
masked_words.append(words[0:i] + ['[UNK]'] + words[i + 1:])
# list of words
return masked_words
def get_important_scores(self, text, origin_embeds, batch_size, max_length):
# device = origin_embeds.device
masked_words = self._get_masked(text)
masked_texts = [' '.join(words) for words in masked_words] # list of text of masked words
masked_embeds = []
for i in range(0, len(masked_texts), batch_size):
masked_text_input = self.bert_tokenizer(masked_texts[i:i+batch_size], padding='max_length', truncation=True, max_length=max_length, return_tensors='pt').to(device)
masked_embed = self.ref_net(masked_text_input.text_inputs, attention_mask=masked_text_input.attention_mask)
masked_embeds.append(masked_embed)
masked_embeds = torch.cat(masked_embeds, dim=0)
criterion = torch.nn.KLDivLoss(reduction='none')
import_scores = criterion(masked_embeds.log_softmax(dim=-1), origin_embeds.softmax(dim=-1).repeat(len(masked_texts), 1))
return import_scores.sum(dim=-1)
def adv_loss(self,anchor, negetive_code,negetive_mean,negative_var, positive_code,positive_mean,positive_var):
loss1=F.triplet_margin_with_distance_loss(anchor, positive_code,negetive_code, distance_function=nn.CosineSimilarity(),reduction='none')
negative_dist=(anchor-negetive_mean)**2 / negative_var
positive_dist=(anchor-positive_mean)**2 /positive_var
negatives=torch.exp(negative_dist / self.args.temperature)
positives= torch.exp(positive_dist / self.args.temperature)
loss= torch.log(positives/(positives+negatives)) + self.args.beta* loss1
return loss
def target_adv(self, text_tokens, negetive_code,negetive_mean,negative_var, positive_code,positive_mean,positive_var):
texts=self.clip_tokenizer.decode(text_tokens.cpu())
text_inputs = self.bert_tokenizer(texts, padding='max_length', truncation=True, max_length=self.max_length, return_tensors='pt').to(device, non_blocking=True)
mlm_logits = self.ref_net(text_inputs.input_ids, attention_mask=text_inputs.attention_mask).logits
word_pred_scores_all, word_predictions = torch.topk(mlm_logits, self.topk, -1)
#clean state
clean_embeds=self.ref_net(text_inputs.input_ids, attention_mask=text_inputs.attention_mask)
final_adverse = []
for i, text in enumerate(texts):
important_scores = self.get_important_scores(text, clean_embeds, self.batch_size, self.max_length)
list_of_index = sorted(enumerate(important_scores), key=lambda x: x[1], reverse=True)
words, sub_words, keys = self._tokenize(text)
final_words = copy.deepcopy(words)
change = 0
for top_index in list_of_index:
if change >= self.args.num_perturbation:
break
tgt_word = words[top_index[0]]
if tgt_word in filter_words:
continue
if keys[top_index[0]][0] > self.args.max_length - 2:
continue
substitutes = word_predictions[i, keys[top_index[0]][0]:keys[top_index[0]][1]] # L, k
word_pred_scores = word_pred_scores_all[i, keys[top_index[0]][0]:keys[top_index[0]][1]]
substitutes = get_substitues(substitutes, self.tokenizer, self.ref_net, 1, word_pred_scores,
self.args.threshold_pred_score)
replace_texts = [' '.join(final_words)]
available_substitutes = [tgt_word]
for substitute_ in substitutes:
substitute = substitute_
if substitute == tgt_word:
continue # filter out original word
if '##' in substitute:
continue # filter out sub-word
if substitute in filter_words:
continue
temp_replace = copy.deepcopy(final_words)
temp_replace[top_index[0]] = substitute
available_substitutes.append(substitute)
replace_texts.append(' '.join(temp_replace))
replace_text_input = self.clip_tokenizer(replace_texts).to(device)
replace_embeds = self.model.encode_text(replace_text_input)
loss = self.adv_loss(replace_embeds, negetive_code,negetive_mean,negative_var,positive_code,positive_mean,positive_var)
loss = loss.sum(dim=-1)
candidate_idx = loss.argmax()
final_words[top_index[0]] = available_substitutes[candidate_idx]
if available_substitutes[candidate_idx] != tgt_word:
change += 1
final_adverse.append(' '.join(final_words))
return final_adverse
def train_epoch(self):
self.change_state(mode="valid")
save_dir = os.path.join(self.args.save_dir, "adv_PR_cruve")
save_dir = os.path.join(self.args.save_dir, "adv_PR_t2i")
all_loss = 0
times = 0
adv_codes=[]
@ -168,33 +326,32 @@ class Trainer(TrainBase):
positive_code=self.model.encode_image(target_image)
delta, adv_code=self.target_adv(image,negetive_code,negetive_mean,negative_var,
final_adverse=self.target_adv(image,negetive_code,negetive_mean,negative_var,
positive_code,positive_mean,positive_var)
final_text=self.clip_tokenizer.tokenize(final_adverse).to(self.rank, non_blocking=True)
adv_code=self.model.encode_text(final_text)
adv_codes.append(adv_code.cpu().detach().numpy())
adv_label.append(target_label.numpy())
adv_img=np.concatenate(adv_codes)
adv_txt=np.concatenate(adv_codes)
adv_labels=np.concatenate(adv_label)
retrieval_img, retrieval_txt = self.get_code(self.retrieval_loader, self.args.retrieval_num)
retrieval_img, _ = self.get_code(self.retrieval_loader, self.args.retrieval_num)
retrieval_txt = retrieval_txt.cpu().detach().numpy()
retrieval_img = retrieval_img.cpu().detach().numpy()
retrieval_labels = self.retrieval_labels.numpy()
mAP_t=cal_map(adv_img,adv_labels,retrieval_txt,retrieval_labels)
mAP_t=cal_map(adv_txt,adv_labels,retrieval_img,retrieval_labels)
# pr=cal_pr(retrieval_txt,adv_img,query_labels,retrieval_labels)
# pr_t=cal_pr(retrieval_txt,adv_img,adv_labels,retrieval_labels)
self.logger.info(f">>>>>> MAP_t: {mAP_t}")
result_dict = {
'adv_img': adv_img,
'r_txt': retrieval_txt,
'adv_txt': adv_txt,
'r_img': retrieval_img,
'adv_l': adv_labels,
'r_l': retrieval_labels
# 'q_l':query_labels
# 'pr': pr,
# 'pr_t': pr_t
}
scio.savemat(os.path.join(save_dir, str(self.args.output_dim) + "-adv-" + self.args.dataset + ".mat"), result_dict)
self.logger.info(">>>>>> save all data!")
@ -204,28 +361,11 @@ class Trainer(TrainBase):
def train(self):
self.logger.info("Start train.")
for epoch in range(self.args.epochs):
self.train_epoch(epoch)
self.valid(epoch)
self.save_model(epoch)
self.logger.info(f">>>>>>> FINISHED >>>>>> Best epoch, I-T: {self.best_epoch_i}, mAP: {self.max_mapi2t}, T-I: {self.best_epoch_t}, mAP: {self.max_mapt2i}")
def make_hash_code(self, code: list) -> torch.Tensor:
code = torch.stack(code)
# print(code.shape)
code = code.permute(1, 0, 2)
hash_code = torch.argmax(code, dim=-1)
hash_code[torch.where(hash_code == 0)] = -1
hash_code = hash_code.float()
return hash_code
def get_code(self, data_loader, length: int):
@ -285,28 +425,6 @@ class Trainer(TrainBase):
self.logger.info(">>>>>> save all data!")
# def valid(self, epoch):
# self.logger.info("Valid.")
# self.change_state(mode="valid")
# query_img, query_txt = self.get_code(self.query_loader, self.args.query_num) if self.args.hash_layer == "select" else super().get_code(self.query_loader, self.args.query_num)
# retrieval_img, retrieval_txt = self.get_code(self.retrieval_loader, self.args.retrieval_num) if self.args.hash_layer == "select" else super().get_code(self.retrieval_loader, self.args.retrieval_num)
# # print("get all code")
# mAPi2t = calc_map_k(query_img, retrieval_txt, self.query_labels, self.retrieval_labels, None, self.rank)
# # print("map map")
# mAPt2i = calc_map_k(query_txt, retrieval_img, self.query_labels, self.retrieval_labels, None, self.rank)
# mAPi2i = calc_map_k(query_img, retrieval_img, self.query_labels, self.retrieval_labels, None, self.rank)
# mAPt2t = calc_map_k(query_txt, retrieval_txt, self.query_labels, self.retrieval_labels, None, self.rank)
# if self.max_mapi2t < mAPi2t:
# self.best_epoch_i = epoch
# self.save_mat(query_img, query_txt, retrieval_img, retrieval_txt, mode_name="i2t")
# self.max_mapi2t = max(self.max_mapi2t, mAPi2t)
# if self.max_mapt2i < mAPt2i:
# self.best_epoch_t = epoch
# self.save_mat(query_img, query_txt, retrieval_img, retrieval_txt, mode_name="t2i")
# self.max_mapt2i = max(self.max_mapt2i, mAPt2i)
# self.logger.info(f">>>>>> [{epoch}/{self.args.epochs}], MAP(i->t): {mAPi2t}, MAP(t->i): {mAPt2i}, MAP(t->t): {mAPt2t}, MAP(i->i): {mAPi2i}, \
# MAX MAP(i->t): {self.max_mapi2t}, MAX MAP(t->i): {self.max_mapt2i}")
def save_mat(self, query_img, query_txt, retrieval_img, retrieval_txt, mode_name="i2t"):
save_dir = os.path.join(self.args.save_dir, "PR_cruve")

View File

@ -9,18 +9,19 @@ def get_args():
parser.add_argument("--save-dir", type=str, default="./result/64-bit")
parser.add_argument("--clip-path", type=str, default="./ViT-B-32.pt", help="pretrained clip path.")
parser.add_argument("--pretrained", type=str, default="")
parser.add_argument("--dataset", type=str, default="flickr25k", help="choise from [coco, mirflckr25k, nuswide]")
parser.add_argument("--dataset", type=str, default="iapr", help="choise from [coco, flckr25k, iapr]")
parser.add_argument("--index-file", type=str, default="index.mat")
parser.add_argument("--caption-file", type=str, default="caption.mat")
parser.add_argument("--label-file", type=str, default="label.mat")
parser.add_argument("--similarity-function", type=str, default="euclidean", help="choise form [cosine, euclidean]")
parser.add_argument("--loss-type", type=str, default="l2", help="choise form [l1, l2]")
parser.add_argument('--victim', default='ViT-B/16', choices=['ViT-L/14', 'ViT-B/16', 'ViT-B/32', 'RN50', 'RN101'])
# parser.add_argument("--test-caption-file", type=str, default="./data/test/captions.mat")
# parser.add_argument("--test-label-file", type=str, default="./data/test/label.mat")
parser.add_argument("--text_encoder", type=str, default="bert-base-uncased")
parser.add_argument("--temperature", type=float, default=0.05)
parser.add_argument("--beta", type=float, default=10.0)
parser.add_argument("--txt-dim", type=int, default=1024)
parser.add_argument("--output-dim", type=int, default=512)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--epochs", type=int, default=500)
parser.add_argument("--max-words", type=int, default=77)
parser.add_argument("--resolution", type=int, default=224)
parser.add_argument("--batch-size", type=int, default=8)

View File

@ -169,64 +169,7 @@ def compute_hash_similarity(visual_embed, text_embed, use_softmax_hash=False, al
tv_similarity = calcHammingDist(hash_text, hash_visual)
return vt_similarity, tv_similarity
class CrossEn(nn.Module):
def __init__(self, mode="cosine"):
super(CrossEn, self).__init__()
# if mode == "euclidean":
# self.compute_func = F.softmax
# else:
# self.compute_func = F.log_softmax
self.mode = mode
def forward(self, sim_matrix):
# if self.mode == "cosine":
# logpt = F.log_softmax(sim_matrix, dim=-1)
# logpt = torch.diag(logpt)
# nce_loss = -logpt
# sim_loss = nce_loss.mean()
# elif self.mode == "euclidean":
# logpt = F.softmax(sim_matrix, dim=-1)
# logpt = torch.diag(sim_matrix)
# sim_loss = logpt.mean()
# else:
# raise ValueError("mode paramater is not support.[cosine, euclidean]")
if self.mode == "euclidean":
sim_matrix = -sim_matrix
logpt = F.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
sim_loss = nce_loss.mean()
return sim_loss
class CrossEn_mean(nn.Module):
def __init__(self, mode="cosine"):
super(CrossEn_mean, self).__init__()
# if mode == "euclidean":
# self.compute_func = F.softmax
# else:
# self.compute_func = F.log_softmax
self.mode = mode
def forward(self, sim_matrix):
# if self.mode == "cosine":
# logpt = F.log_softmax(sim_matrix, dim=-1)
# logpt = torch.diag(logpt)
# nce_loss = -logpt
# sim_loss = nce_loss.mean()
# elif self.mode == "euclidean":
# logpt = F.softmax(sim_matrix, dim=-1)
# logpt = torch.diag(sim_matrix)
# sim_loss = logpt.mean()
# else:
# raise ValueError("mode paramater is not support.[cosine, euclidean]")
# if self.mode == "euclidean":
# sim_matrix = -sim_matrix
# print(sim_matrix.max(), sim_matrix.min())
# logpt = F.log_softmax(sim_matrix, dim=-1)
# logpt = torch.diag(logpt)
# print(logpt.max())
sim_loss = sim_matrix.mean()
return sim_loss
def find_indices(array, b):
# Create a boolean mask where the first dimension of the array equals b