change to bertattack
This commit is contained in:
parent
da9bd340de
commit
1b7f952c39
|
|
@ -0,0 +1,543 @@
|
|||
# coding=utf-8
|
||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Tokenization classes for Bert."""
|
||||
|
||||
|
||||
import collections
|
||||
import os
|
||||
import unicodedata
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from transformers.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
|
||||
from transformers.utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
|
||||
|
||||
PRETRAINED_VOCAB_FILES_MAP = {
|
||||
"vocab_file": {
|
||||
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
|
||||
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
|
||||
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
|
||||
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
|
||||
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt",
|
||||
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
|
||||
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
|
||||
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
|
||||
"bert-large-uncased-whole-word-masking": "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt",
|
||||
"bert-large-cased-whole-word-masking": "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt",
|
||||
"bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt",
|
||||
"bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt",
|
||||
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt",
|
||||
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
|
||||
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt",
|
||||
"TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt",
|
||||
"TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt",
|
||||
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt",
|
||||
}
|
||||
}
|
||||
|
||||
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
||||
"bert-base-uncased": 512,
|
||||
"bert-large-uncased": 512,
|
||||
"bert-base-cased": 512,
|
||||
"bert-large-cased": 512,
|
||||
"bert-base-multilingual-uncased": 512,
|
||||
"bert-base-multilingual-cased": 512,
|
||||
"bert-base-chinese": 512,
|
||||
"bert-base-german-cased": 512,
|
||||
"bert-large-uncased-whole-word-masking": 512,
|
||||
"bert-large-cased-whole-word-masking": 512,
|
||||
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
|
||||
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
|
||||
"bert-base-cased-finetuned-mrpc": 512,
|
||||
"bert-base-german-dbmdz-cased": 512,
|
||||
"bert-base-german-dbmdz-uncased": 512,
|
||||
"TurkuNLP/bert-base-finnish-cased-v1": 512,
|
||||
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
|
||||
"wietsedv/bert-base-dutch-cased": 512,
|
||||
}
|
||||
|
||||
PRETRAINED_INIT_CONFIGURATION = {
|
||||
"bert-base-uncased": {"do_lower_case": True},
|
||||
"bert-large-uncased": {"do_lower_case": True},
|
||||
"bert-base-cased": {"do_lower_case": False},
|
||||
"bert-large-cased": {"do_lower_case": False},
|
||||
"bert-base-multilingual-uncased": {"do_lower_case": True},
|
||||
"bert-base-multilingual-cased": {"do_lower_case": False},
|
||||
"bert-base-chinese": {"do_lower_case": False},
|
||||
"bert-base-german-cased": {"do_lower_case": False},
|
||||
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
|
||||
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
|
||||
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
|
||||
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
|
||||
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
|
||||
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
|
||||
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
|
||||
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
|
||||
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
|
||||
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
|
||||
}
|
||||
|
||||
|
||||
def load_vocab(vocab_file):
|
||||
"""Loads a vocabulary file into a dictionary."""
|
||||
vocab = collections.OrderedDict()
|
||||
with open(vocab_file, "r", encoding="utf-8") as reader:
|
||||
tokens = reader.readlines()
|
||||
for index, token in enumerate(tokens):
|
||||
token = token.rstrip("\n")
|
||||
vocab[token] = index
|
||||
return vocab
|
||||
|
||||
|
||||
def whitespace_tokenize(text):
|
||||
"""Runs basic whitespace cleaning and splitting on a piece of text."""
|
||||
text = text.strip()
|
||||
if not text:
|
||||
return []
|
||||
tokens = text.split()
|
||||
return tokens
|
||||
|
||||
|
||||
class BertTokenizer(PreTrainedTokenizer):
|
||||
r"""
|
||||
Construct a BERT tokenizer. Based on WordPiece.
|
||||
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
|
||||
Users should refer to this superclass for more information regarding those methods.
|
||||
Args:
|
||||
vocab_file (:obj:`str`):
|
||||
File containing the vocabulary.
|
||||
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
||||
Whether or not to lowercase the input when tokenizing.
|
||||
do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
||||
Whether or not to do basic tokenization before WordPiece.
|
||||
never_split (:obj:`Iterable`, `optional`):
|
||||
Collection of tokens which will never be split during tokenization. Only has an effect when
|
||||
:obj:`do_basic_tokenize=True`
|
||||
unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`):
|
||||
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
||||
token instead.
|
||||
sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`):
|
||||
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
||||
sequence classification or for a text and a question for question answering. It is also used as the last
|
||||
token of a sequence built with special tokens.
|
||||
pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`):
|
||||
The token used for padding, for example when batching sequences of different lengths.
|
||||
cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`):
|
||||
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
||||
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
||||
mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`):
|
||||
The token used for masking values. This is the token used when training this model with masked language
|
||||
modeling. This is the token which the model will try to predict.
|
||||
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
||||
Whether or not to tokenize Chinese characters.
|
||||
This should likely be deactivated for Japanese (see this `issue
|
||||
<https://github.com/huggingface/transformers/issues/328>`__).
|
||||
strip_accents: (:obj:`bool`, `optional`):
|
||||
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
||||
value for :obj:`lowercase` (as in the original BERT).
|
||||
"""
|
||||
|
||||
vocab_files_names = VOCAB_FILES_NAMES
|
||||
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
||||
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
|
||||
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_file,
|
||||
do_lower_case=True,
|
||||
do_basic_tokenize=True,
|
||||
never_split=None,
|
||||
unk_token="[UNK]",
|
||||
sep_token="[SEP]",
|
||||
pad_token="[PAD]",
|
||||
cls_token="[CLS]",
|
||||
mask_token="[MASK]",
|
||||
tokenize_chinese_chars=True,
|
||||
strip_accents=None,
|
||||
**kwargs
|
||||
):
|
||||
super().__init__(
|
||||
do_lower_case=do_lower_case,
|
||||
do_basic_tokenize=do_basic_tokenize,
|
||||
never_split=never_split,
|
||||
unk_token=unk_token,
|
||||
sep_token=sep_token,
|
||||
pad_token=pad_token,
|
||||
cls_token=cls_token,
|
||||
mask_token=mask_token,
|
||||
tokenize_chinese_chars=tokenize_chinese_chars,
|
||||
strip_accents=strip_accents,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if not os.path.isfile(vocab_file):
|
||||
raise ValueError(
|
||||
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
|
||||
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
|
||||
)
|
||||
self.vocab = load_vocab(vocab_file)
|
||||
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
|
||||
self.do_basic_tokenize = do_basic_tokenize
|
||||
if do_basic_tokenize:
|
||||
self.basic_tokenizer = BasicTokenizer(
|
||||
do_lower_case=do_lower_case,
|
||||
never_split=never_split,
|
||||
tokenize_chinese_chars=tokenize_chinese_chars,
|
||||
strip_accents=strip_accents,
|
||||
)
|
||||
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
|
||||
|
||||
@property
|
||||
def do_lower_case(self):
|
||||
return self.basic_tokenizer.do_lower_case
|
||||
|
||||
@property
|
||||
def vocab_size(self):
|
||||
return len(self.vocab)
|
||||
|
||||
def get_vocab(self):
|
||||
return dict(self.vocab, **self.added_tokens_encoder)
|
||||
|
||||
def _tokenize(self, text):
|
||||
split_tokens = []
|
||||
if self.do_basic_tokenize:
|
||||
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
|
||||
|
||||
# If the token is part of the never_split set
|
||||
if token in self.basic_tokenizer.never_split:
|
||||
split_tokens.append(token)
|
||||
else:
|
||||
split_tokens += self.wordpiece_tokenizer.tokenize(token)
|
||||
else:
|
||||
split_tokens = self.wordpiece_tokenizer.tokenize(text)
|
||||
return split_tokens
|
||||
|
||||
def _convert_token_to_id(self, token):
|
||||
""" Converts a token (str) in an id using the vocab. """
|
||||
return self.vocab.get(token, self.vocab.get(self.unk_token))
|
||||
|
||||
# def _convert_tokens_to_ids(self, tokens):
|
||||
# """ Converts a token (str) in an id using the vocab. """
|
||||
# return [self._convert_token_to_id(token) for token in tokens]
|
||||
|
||||
def _convert_id_to_token(self, index):
|
||||
"""Converts an index (integer) in a token (str) using the vocab."""
|
||||
return self.ids_to_tokens.get(index, self.unk_token)
|
||||
|
||||
def convert_tokens_to_string(self, tokens):
|
||||
""" Converts a sequence of tokens (string) in a single string. """
|
||||
out_string = " ".join(tokens).replace(" ##", "").strip()
|
||||
return out_string
|
||||
|
||||
def build_inputs_with_special_tokens(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||||
) -> List[int]:
|
||||
"""
|
||||
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
||||
adding special tokens. A BERT sequence has the following format:
|
||||
- single sequence: ``[CLS] X ``
|
||||
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
|
||||
Args:
|
||||
token_ids_0 (:obj:`List[int]`):
|
||||
List of IDs to which the special tokens will be added.
|
||||
token_ids_1 (:obj:`List[int]`, `optional`):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
Returns:
|
||||
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
|
||||
"""
|
||||
if token_ids_1 is None:
|
||||
return [self.cls_token_id] + token_ids_0
|
||||
cls = [self.cls_token_id]
|
||||
sep = [self.sep_token_id]
|
||||
return cls + token_ids_0 + sep + token_ids_1 + sep
|
||||
|
||||
def get_special_tokens_mask(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
||||
) -> List[int]:
|
||||
"""
|
||||
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
||||
special tokens using the tokenizer ``prepare_for_model`` method.
|
||||
Args:
|
||||
token_ids_0 (:obj:`List[int]`):
|
||||
List of IDs.
|
||||
token_ids_1 (:obj:`List[int]`, `optional`):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
|
||||
Whether or not the token list is already formatted with special tokens for the model.
|
||||
Returns:
|
||||
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
||||
"""
|
||||
|
||||
if already_has_special_tokens:
|
||||
if token_ids_1 is not None:
|
||||
raise ValueError(
|
||||
"You should not supply a second sequence if the provided sequence of "
|
||||
"ids is already formatted with special tokens for the model."
|
||||
)
|
||||
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
|
||||
|
||||
if token_ids_1 is not None:
|
||||
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
|
||||
return [1] + ([0] * len(token_ids_0)) + [1]
|
||||
|
||||
def create_token_type_ids_from_sequences(
|
||||
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
||||
) -> List[int]:
|
||||
"""
|
||||
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
|
||||
pair mask has the following format:
|
||||
::
|
||||
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
||||
| first sequence | second sequence |
|
||||
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
|
||||
Args:
|
||||
token_ids_0 (:obj:`List[int]`):
|
||||
List of IDs.
|
||||
token_ids_1 (:obj:`List[int]`, `optional`):
|
||||
Optional second list of IDs for sequence pairs.
|
||||
Returns:
|
||||
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
|
||||
sequence(s).
|
||||
"""
|
||||
sep = [self.sep_token_id]
|
||||
cls = [self.cls_token_id]
|
||||
if token_ids_1 is None:
|
||||
return len(cls + token_ids_0 + sep) * [0]
|
||||
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
|
||||
|
||||
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
||||
index = 0
|
||||
if os.path.isdir(save_directory):
|
||||
vocab_file = os.path.join(
|
||||
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
||||
)
|
||||
else:
|
||||
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
|
||||
with open(vocab_file, "w", encoding="utf-8") as writer:
|
||||
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
|
||||
if index != token_index:
|
||||
logger.warning(
|
||||
"Saving vocabulary to {}: vocabulary indices are not consecutive."
|
||||
" Please check that the vocabulary is not corrupted!".format(vocab_file)
|
||||
)
|
||||
index = token_index
|
||||
writer.write(token + "\n")
|
||||
index += 1
|
||||
return (vocab_file,)
|
||||
|
||||
|
||||
class BasicTokenizer(object):
|
||||
"""
|
||||
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
|
||||
Args:
|
||||
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
||||
Whether or not to lowercase the input when tokenizing.
|
||||
never_split (:obj:`Iterable`, `optional`):
|
||||
Collection of tokens which will never be split during tokenization. Only has an effect when
|
||||
:obj:`do_basic_tokenize=True`
|
||||
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
|
||||
Whether or not to tokenize Chinese characters.
|
||||
This should likely be deactivated for Japanese (see this `issue
|
||||
<https://github.com/huggingface/transformers/issues/328>`__).
|
||||
strip_accents: (:obj:`bool`, `optional`):
|
||||
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
|
||||
value for :obj:`lowercase` (as in the original BERT).
|
||||
"""
|
||||
|
||||
def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):
|
||||
if never_split is None:
|
||||
never_split = []
|
||||
self.do_lower_case = do_lower_case
|
||||
self.never_split = set(never_split)
|
||||
self.tokenize_chinese_chars = tokenize_chinese_chars
|
||||
self.strip_accents = strip_accents
|
||||
|
||||
def tokenize(self, text, never_split=None):
|
||||
"""
|
||||
Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see
|
||||
WordPieceTokenizer.
|
||||
Args:
|
||||
**never_split**: (`optional`) list of str
|
||||
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
|
||||
:func:`PreTrainedTokenizer.tokenize`) List of token not to split.
|
||||
"""
|
||||
# union() returns a new set by concatenating the two sets.
|
||||
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
|
||||
text = self._clean_text(text)
|
||||
|
||||
# This was added on November 1st, 2018 for the multilingual and Chinese
|
||||
# models. This is also applied to the English models now, but it doesn't
|
||||
# matter since the English models were not trained on any Chinese data
|
||||
# and generally don't have any Chinese data in them (there are Chinese
|
||||
# characters in the vocabulary because Wikipedia does have some Chinese
|
||||
# words in the English Wikipedia.).
|
||||
if self.tokenize_chinese_chars:
|
||||
text = self._tokenize_chinese_chars(text)
|
||||
orig_tokens = whitespace_tokenize(text)
|
||||
split_tokens = []
|
||||
for token in orig_tokens:
|
||||
if token not in never_split:
|
||||
if self.do_lower_case:
|
||||
token = token.lower()
|
||||
if self.strip_accents is not False:
|
||||
token = self._run_strip_accents(token)
|
||||
elif self.strip_accents:
|
||||
token = self._run_strip_accents(token)
|
||||
split_tokens.extend(self._run_split_on_punc(token, never_split))
|
||||
|
||||
output_tokens = whitespace_tokenize(" ".join(split_tokens))
|
||||
return output_tokens
|
||||
|
||||
def _run_strip_accents(self, text):
|
||||
"""Strips accents from a piece of text."""
|
||||
text = unicodedata.normalize("NFD", text)
|
||||
output = []
|
||||
for char in text:
|
||||
cat = unicodedata.category(char)
|
||||
if cat == "Mn":
|
||||
continue
|
||||
output.append(char)
|
||||
return "".join(output)
|
||||
|
||||
def _run_split_on_punc(self, text, never_split=None):
|
||||
"""Splits punctuation on a piece of text."""
|
||||
if never_split is not None and text in never_split:
|
||||
return [text]
|
||||
chars = list(text)
|
||||
i = 0
|
||||
start_new_word = True
|
||||
output = []
|
||||
while i < len(chars):
|
||||
char = chars[i]
|
||||
if _is_punctuation(char):
|
||||
output.append([char])
|
||||
start_new_word = True
|
||||
else:
|
||||
if start_new_word:
|
||||
output.append([])
|
||||
start_new_word = False
|
||||
output[-1].append(char)
|
||||
i += 1
|
||||
|
||||
return ["".join(x) for x in output]
|
||||
|
||||
def _tokenize_chinese_chars(self, text):
|
||||
"""Adds whitespace around any CJK character."""
|
||||
output = []
|
||||
for char in text:
|
||||
cp = ord(char)
|
||||
if self._is_chinese_char(cp):
|
||||
output.append(" ")
|
||||
output.append(char)
|
||||
output.append(" ")
|
||||
else:
|
||||
output.append(char)
|
||||
return "".join(output)
|
||||
|
||||
def _is_chinese_char(self, cp):
|
||||
"""Checks whether CP is the codepoint of a CJK character."""
|
||||
# This defines a "chinese character" as anything in the CJK Unicode block:
|
||||
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
|
||||
#
|
||||
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
|
||||
# despite its name. The modern Korean Hangul alphabet is a different block,
|
||||
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
|
||||
# space-separated words, so they are not treated specially and handled
|
||||
# like the all of the other languages.
|
||||
if (
|
||||
(cp >= 0x4E00 and cp <= 0x9FFF)
|
||||
or (cp >= 0x3400 and cp <= 0x4DBF) #
|
||||
or (cp >= 0x20000 and cp <= 0x2A6DF) #
|
||||
or (cp >= 0x2A700 and cp <= 0x2B73F) #
|
||||
or (cp >= 0x2B740 and cp <= 0x2B81F) #
|
||||
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
|
||||
or (cp >= 0xF900 and cp <= 0xFAFF)
|
||||
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
|
||||
): #
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _clean_text(self, text):
|
||||
"""Performs invalid character removal and whitespace cleanup on text."""
|
||||
output = []
|
||||
for char in text:
|
||||
cp = ord(char)
|
||||
if cp == 0 or cp == 0xFFFD or _is_control(char):
|
||||
continue
|
||||
if _is_whitespace(char):
|
||||
output.append(" ")
|
||||
else:
|
||||
output.append(char)
|
||||
return "".join(output)
|
||||
|
||||
|
||||
class WordpieceTokenizer(object):
|
||||
"""Runs WordPiece tokenization."""
|
||||
|
||||
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
|
||||
self.vocab = vocab
|
||||
self.unk_token = unk_token
|
||||
self.max_input_chars_per_word = max_input_chars_per_word
|
||||
|
||||
def tokenize(self, text):
|
||||
"""
|
||||
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
|
||||
tokenization using the given vocabulary.
|
||||
For example, :obj:`input = "unaffable"` wil return as output :obj:`["un", "##aff", "##able"]`.
|
||||
Args:
|
||||
text: A single token or whitespace separated tokens. This should have
|
||||
already been passed through `BasicTokenizer`.
|
||||
Returns:
|
||||
A list of wordpiece tokens.
|
||||
"""
|
||||
|
||||
output_tokens = []
|
||||
for token in whitespace_tokenize(text):
|
||||
chars = list(token)
|
||||
if len(chars) > self.max_input_chars_per_word:
|
||||
output_tokens.append(self.unk_token)
|
||||
continue
|
||||
|
||||
is_bad = False
|
||||
start = 0
|
||||
sub_tokens = []
|
||||
while start < len(chars):
|
||||
end = len(chars)
|
||||
cur_substr = None
|
||||
while start < end:
|
||||
substr = "".join(chars[start:end])
|
||||
if start > 0:
|
||||
substr = "##" + substr
|
||||
if substr in self.vocab:
|
||||
cur_substr = substr
|
||||
break
|
||||
end -= 1
|
||||
if cur_substr is None:
|
||||
is_bad = True
|
||||
break
|
||||
sub_tokens.append(cur_substr)
|
||||
start = end
|
||||
|
||||
if is_bad:
|
||||
output_tokens.append(self.unk_token)
|
||||
else:
|
||||
output_tokens.extend(sub_tokens)
|
||||
return output_tokens
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
import copy
|
||||
from torch.nn.modules import loss
|
||||
# from model.hash_model import DCMHT as DCMHT
|
||||
import os
|
||||
|
|
@ -15,16 +16,102 @@ from utils import get_args, calc_neighbor, cosine_similarity, euclidean_similari
|
|||
from utils.calc_utils import cal_map, cal_pr
|
||||
from dataset.dataloader import dataloader
|
||||
import clip
|
||||
from model.simple_tokenizer import SimpleTokenizer as Tokenizer
|
||||
from transformers import BertForMaskedLM
|
||||
from model.bert_tokenizer import BertTokenizer
|
||||
# from transformers import BertModel
|
||||
|
||||
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
def clamp(delta, clean_imgs):
|
||||
filter_words = ['a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against', 'ain', 'all', 'almost',
|
||||
'alone', 'along', 'already', 'also', 'although', 'am', 'among', 'amongst', 'an', 'and', 'another',
|
||||
'any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'aren', "aren't", 'around', 'as',
|
||||
'at', 'back', 'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside', 'besides',
|
||||
'between', 'beyond', 'both', 'but', 'by', 'can', 'cannot', 'could', 'couldn', "couldn't", 'd', 'didn',
|
||||
"didn't", 'doesn', "doesn't", 'don', "don't", 'down', 'due', 'during', 'either', 'else', 'elsewhere',
|
||||
'empty', 'enough', 'even', 'ever', 'everyone', 'everything', 'everywhere', 'except', 'first', 'for',
|
||||
'former', 'formerly', 'from', 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'he', 'hence',
|
||||
'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', 'himself', 'his',
|
||||
'how', 'however', 'hundred', 'i', 'if', 'in', 'indeed', 'into', 'is', 'isn', "isn't", 'it', "it's",
|
||||
'its', 'itself', 'just', 'latter', 'latterly', 'least', 'll', 'may', 'me', 'meanwhile', 'mightn',
|
||||
"mightn't", 'mine', 'more', 'moreover', 'most', 'mostly', 'must', 'mustn', "mustn't", 'my', 'myself',
|
||||
'namely', 'needn', "needn't", 'neither', 'never', 'nevertheless', 'next', 'no', 'nobody', 'none',
|
||||
'noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'o', 'of', 'off', 'on', 'once', 'one', 'only',
|
||||
'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours', 'ourselves', 'out', 'over', 'per',
|
||||
'please', 's', 'same', 'shan', "shan't", 'she', "she's", "should've", 'shouldn', "shouldn't", 'somehow',
|
||||
'something', 'sometime', 'somewhere', 'such', 't', 'than', 'that', "that'll", 'the', 'their', 'theirs',
|
||||
'them', 'themselves', 'then', 'thence', 'there', 'thereafter', 'thereby', 'therefore', 'therein',
|
||||
'thereupon', 'these', 'they', 'this', 'those', 'through', 'throughout', 'thru', 'thus', 'to', 'too',
|
||||
'toward', 'towards', 'under', 'unless', 'until', 'up', 'upon', 'used', 've', 'was', 'wasn', "wasn't",
|
||||
'we', 'were', 'weren', "weren't", 'what', 'whatever', 'when', 'whence', 'whenever', 'where',
|
||||
'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever', 'whether', 'which', 'while',
|
||||
'whither', 'who', 'whoever', 'whole', 'whom', 'whose', 'why', 'with', 'within', 'without', 'won',
|
||||
"won't", 'would', 'wouldn', "wouldn't", 'y', 'yet', 'you', "you'd", "you'll", "you're", "you've",
|
||||
'your', 'yours', 'yourself', 'yourselves', '.', '-', 'a the', '/', '?', 'some', '"', ',', 'b', '&', '!',
|
||||
'@', '%', '^', '*', '(', ')', "-", '-', '+', '=', '<', '>', '|', ':', ";", '~', '·']
|
||||
filter_words = set(filter_words)
|
||||
|
||||
clamp_imgs = (delta.data + clean_imgs.data).clamp(0, 1)
|
||||
clamp_delta = clamp_imgs - clean_imgs.data
|
||||
def get_bpe_substitues(substitutes, tokenizer, mlm_model):
|
||||
# substitutes L, k
|
||||
# device = mlm_model.device
|
||||
substitutes = substitutes[0:12, 0:4] # maximum BPE candidates
|
||||
|
||||
return clamp_delta
|
||||
# find all possible candidates
|
||||
|
||||
all_substitutes = []
|
||||
for i in range(substitutes.size(0)):
|
||||
if len(all_substitutes) == 0:
|
||||
lev_i = substitutes[i]
|
||||
all_substitutes = [[int(c)] for c in lev_i]
|
||||
else:
|
||||
lev_i = []
|
||||
for all_sub in all_substitutes:
|
||||
for j in substitutes[i]:
|
||||
lev_i.append(all_sub + [int(j)])
|
||||
all_substitutes = lev_i
|
||||
|
||||
# all substitutes list of list of token-id (all candidates)
|
||||
c_loss = nn.CrossEntropyLoss(reduction='none')
|
||||
word_list = []
|
||||
# all_substitutes = all_substitutes[:24]
|
||||
all_substitutes = torch.tensor(all_substitutes) # [ N, L ]
|
||||
all_substitutes = all_substitutes[:24].to(device)
|
||||
# print(substitutes.size(), all_substitutes.size())
|
||||
N, L = all_substitutes.size()
|
||||
word_predictions = mlm_model(all_substitutes)[0] # N L vocab-size
|
||||
ppl = c_loss(word_predictions.view(N * L, -1), all_substitutes.view(-1)) # [ N*L ]
|
||||
ppl = torch.exp(torch.mean(ppl.view(N, L), dim=-1)) # N
|
||||
_, word_list = torch.sort(ppl)
|
||||
word_list = [all_substitutes[i] for i in word_list]
|
||||
final_words = []
|
||||
for word in word_list:
|
||||
tokens = [tokenizer._convert_id_to_token(int(i)) for i in word]
|
||||
text = tokenizer.convert_tokens_to_string(tokens)
|
||||
final_words.append(text)
|
||||
return final_words
|
||||
|
||||
def get_substitues(substitutes, tokenizer, mlm_model, use_bpe, substitutes_score=None, threshold=3.0):
|
||||
# substitues L,k
|
||||
# from this matrix to recover a word
|
||||
words = []
|
||||
sub_len, k = substitutes.size() # sub-len, k
|
||||
|
||||
if sub_len == 0:
|
||||
return words
|
||||
|
||||
elif sub_len == 1:
|
||||
for (i, j) in zip(substitutes[0], substitutes_score[0]):
|
||||
if threshold != 0 and j < threshold:
|
||||
break
|
||||
words.append(tokenizer._convert_id_to_token(int(i)))
|
||||
else:
|
||||
if use_bpe == 1:
|
||||
words = get_bpe_substitues(substitutes, tokenizer, mlm_model)
|
||||
else:
|
||||
return words
|
||||
#
|
||||
# print(words)
|
||||
return words
|
||||
|
||||
class Trainer(TrainBase):
|
||||
|
||||
|
|
@ -37,6 +124,9 @@ class Trainer(TrainBase):
|
|||
self.image_mean=image_mean
|
||||
self.image_var=image_var
|
||||
self.device=rank
|
||||
self.clip_tokenizer=Tokenizer()
|
||||
self.bert_tokenizer=BertTokenizer.from_pretrained(self.args.text_encoder,do_lower_case=True)
|
||||
self.ref_net = BertForMaskedLM.from_pretrained(self.args.text_encoder)
|
||||
# self.run()
|
||||
|
||||
def _init_model(self):
|
||||
|
|
@ -89,11 +179,54 @@ class Trainer(TrainBase):
|
|||
)
|
||||
self.train_data=train_data
|
||||
|
||||
def _tokenize(self, text):
|
||||
words = text.split(' ')
|
||||
|
||||
sub_words = []
|
||||
keys = []
|
||||
index = 0
|
||||
for word in words:
|
||||
sub = self.bert_tokenizer.tokenize(word)
|
||||
sub_words += sub
|
||||
keys.append([index, index + len(sub)])
|
||||
index += len(sub)
|
||||
|
||||
return words, sub_words, keys
|
||||
|
||||
def get_important_scores(self, text, origin_embeds, batch_size, max_length):
|
||||
# device = origin_embeds.device
|
||||
|
||||
masked_words = self._get_masked(text)
|
||||
masked_texts = [' '.join(words) for words in masked_words] # list of text of masked words
|
||||
|
||||
masked_embeds = []
|
||||
for i in range(0, len(masked_texts), batch_size):
|
||||
masked_text_input = self.bert_tokenizer(masked_texts[i:i+batch_size], padding='max_length', truncation=True, max_length=max_length, return_tensors='pt').to(device)
|
||||
masked_embed = self.ref_net(masked_text_input.text_inputs, attention_mask=masked_text_input.attention_mask)
|
||||
masked_embeds.append(masked_embed)
|
||||
masked_embeds = torch.cat(masked_embeds, dim=0)
|
||||
|
||||
criterion = torch.nn.KLDivLoss(reduction='none')
|
||||
|
||||
import_scores = criterion(masked_embeds.log_softmax(dim=-1), origin_embeds.softmax(dim=-1).repeat(len(masked_texts), 1))
|
||||
|
||||
return import_scores.sum(dim=-1)
|
||||
|
||||
def _get_masked(self, text):
|
||||
words = text.split(' ')
|
||||
len_text = len(words)
|
||||
masked_words = []
|
||||
for i in range(len_text):
|
||||
masked_words.append(words[0:i] + ['[UNK]'] + words[i + 1:])
|
||||
# list of words
|
||||
return masked_words
|
||||
|
||||
|
||||
def generate_mapping(self):
|
||||
image_train=[]
|
||||
label_train=[]
|
||||
for image, text, label, index in self.train_loader:
|
||||
# raw_text=[self.clip_tokenizer.decode(token) for token in text]
|
||||
image=image.to(device, non_blocking=True)
|
||||
# print(self.model.vocab_size)
|
||||
temp_image=self.model.encode_image(image)
|
||||
|
|
@ -112,30 +245,66 @@ class Trainer(TrainBase):
|
|||
image_var_representation[str(centroid.astype(int))]= image_var[i]
|
||||
return image_representation, image_var_representation
|
||||
|
||||
def target_adv(self, image, negetive_code,negetive_mean,negative_var, positive_code,positive_mean,positive_var,
|
||||
|
||||
def target_adv(self, texts, raw_text, negetive_code,negetive_mean,negative_var, positive_code,positive_mean,positive_var,
|
||||
beta=10 ,epsilon=0.03125, alpha=3/255, num_iter=1500, temperature=0.05):
|
||||
|
||||
delta = torch.zeros_like(image,requires_grad=True)
|
||||
# one=torch.zeros_like(positive)
|
||||
bert_inputs=self.bert_tokenizer(raw_text, padding='max_length', truncation=True, max_length=self.max_length, return_tensors='pt').to(device, non_blocking=True)
|
||||
mlm_logits = self.ref_net(bert_inputs.input_ids, attention_mask=bert_inputs.attention_mask).logits
|
||||
word_pred_scores_all, word_predictions = torch.topk(mlm_logits, self.topk, -1)
|
||||
|
||||
#clean state
|
||||
clean_embeds=self.ref_net(bert_inputs.input_ids, attention_mask=bert_inputs.attention_mask)
|
||||
final_adverse = []
|
||||
|
||||
# alienation_loss = nn.TripletMarginLoss(margin=1.0, p=2, eps=1e-7)
|
||||
for i in range(num_iter):
|
||||
self.model.zero_grad()
|
||||
anchor=self.model.encode_image(image+delta)
|
||||
loss1=F.triplet_margin_with_distance_loss(anchor, positive_code,negetive_code, distance_function=nn.CosineSimilarity())
|
||||
negative_dist=(anchor-negetive_mean)**2 / negative_var
|
||||
positive_dist=(anchor-positive_mean)**2 /positive_var
|
||||
negatives=torch.exp(negative_dist / temperature)
|
||||
positives= torch.exp(positive_dist / temperature)
|
||||
loss= torch.log(positives/(positives+negatives)).mean() + beta* loss1
|
||||
loss.backward(retain_graph=True)
|
||||
delta.data = delta - alpha * delta.grad.detach().sign()
|
||||
delta.data =clamp(delta, image).clamp(-epsilon, epsilon)
|
||||
delta.grad.zero_()
|
||||
adv_code=self.model.encode_image(image+delta)
|
||||
return delta.detach() , adv_code
|
||||
for i, text in enumerate(texts):
|
||||
important_scores = self.get_important_scores(text, clean_embeds, self.batch_size, self.max_length)
|
||||
list_of_index = sorted(enumerate(important_scores), key=lambda x: x[1], reverse=True)
|
||||
words, sub_words, keys = self._tokenize(text)
|
||||
final_words = copy.deepcopy(words)
|
||||
change = 0
|
||||
for top_index in list_of_index:
|
||||
if change >= self.args.num_perturbation:
|
||||
break
|
||||
tgt_word = words[top_index[0]]
|
||||
if tgt_word in filter_words:
|
||||
continue
|
||||
if keys[top_index[0]][0] > self.args.max_length - 2:
|
||||
continue
|
||||
substitutes = word_predictions[i, keys[top_index[0]][0]:keys[top_index[0]][1]] # L, k
|
||||
word_pred_scores = word_pred_scores_all[i, keys[top_index[0]][0]:keys[top_index[0]][1]]
|
||||
substitutes = get_substitues(substitutes, self.tokenizer, self.ref_net, 1, word_pred_scores,
|
||||
self.args.threshold_pred_score)
|
||||
replace_texts = [' '.join(final_words)]
|
||||
available_substitutes = [tgt_word]
|
||||
for substitute_ in substitutes:
|
||||
substitute = substitute_
|
||||
if substitute == tgt_word:
|
||||
continue # filter out original word
|
||||
if '##' in substitute:
|
||||
continue # filter out sub-word
|
||||
|
||||
if substitute in filter_words:
|
||||
continue
|
||||
temp_replace = copy.deepcopy(final_words)
|
||||
temp_replace[top_index[0]] = substitute
|
||||
available_substitutes.append(substitute)
|
||||
replace_texts.append(' '.join(temp_replace))
|
||||
replace_text_input = self.clip_tokenizer(replace_texts).to(device)
|
||||
replace_embeds = self.model.encode_text(replace_text_input)
|
||||
|
||||
loss = self.adv_loss(replace_embeds, negetive_code,negetive_mean,negative_var,positive_code,positive_mean,positive_var)
|
||||
loss = loss.sum(dim=-1)
|
||||
candidate_idx = loss.argmax()
|
||||
final_words[top_index[0]] = available_substitutes[candidate_idx]
|
||||
if available_substitutes[candidate_idx] != tgt_word:
|
||||
change += 1
|
||||
final_adverse.append(' '.join(final_words))
|
||||
return final_adverse
|
||||
|
||||
def train_epoch(self):
|
||||
self.change_state(mode="valid")
|
||||
# self.change_state(mode="valid")
|
||||
save_dir = os.path.join(self.args.save_dir, "adv_PR_cruve")
|
||||
all_loss = 0
|
||||
times = 0
|
||||
|
|
@ -146,6 +315,7 @@ class Trainer(TrainBase):
|
|||
times += 1
|
||||
print(times)
|
||||
image.float()
|
||||
raw_text=[self.clip_tokenizer.decode(token) for token in text]
|
||||
image = image.to(self.rank, non_blocking=True)
|
||||
text = text.to(self.rank, non_blocking=True)
|
||||
negetive_mean=np.stack([self.image_mean[str(i.astype(int))] for i in label.detach().cpu().numpy()])
|
||||
|
|
@ -168,14 +338,16 @@ class Trainer(TrainBase):
|
|||
positive_code=self.model.encode_image(target_image)
|
||||
|
||||
|
||||
delta, adv_code=self.target_adv(image,negetive_code,negetive_mean,negative_var,
|
||||
final_adverse=self.target_adv(text, raw_text,negetive_code,negetive_mean,negative_var,
|
||||
positive_code,positive_mean,positive_var)
|
||||
final_text=self.clip_tokenizer.tokenize(final_adverse).to(self.rank, non_blocking=True)
|
||||
adv_code=self.model.encode_text(final_text)
|
||||
adv_codes.append(adv_code.cpu().detach().numpy())
|
||||
adv_label.append(target_label.numpy())
|
||||
adv_img=np.concatenate(adv_codes)
|
||||
adv_labels=np.concatenate(adv_label)
|
||||
|
||||
retrieval_img, retrieval_txt = self.get_code(self.retrieval_loader, self.args.retrieval_num)
|
||||
_, retrieval_txt = self.get_code(self.retrieval_loader, self.args.retrieval_num)
|
||||
|
||||
|
||||
|
||||
|
|
@ -184,8 +356,6 @@ class Trainer(TrainBase):
|
|||
|
||||
|
||||
mAP_t=cal_map(adv_img,adv_labels,retrieval_txt,retrieval_labels)
|
||||
# pr=cal_pr(retrieval_txt,adv_img,query_labels,retrieval_labels)
|
||||
# pr_t=cal_pr(retrieval_txt,adv_img,adv_labels,retrieval_labels)
|
||||
self.logger.info(f">>>>>> MAP_t: {mAP_t}")
|
||||
result_dict = {
|
||||
'adv_img': adv_img,
|
||||
|
|
|
|||
Loading…
Reference in New Issue