This commit is contained in:
parent
d828c6de9a
commit
8498d724d1
|
|
@ -150,6 +150,7 @@ dmypy.json
|
||||||
|
|
||||||
# Pyre type checker
|
# Pyre type checker
|
||||||
.pyre/
|
.pyre/
|
||||||
|
.vscode/
|
||||||
|
|
||||||
# pytype static type analyzer
|
# pytype static type analyzer
|
||||||
.pytype/
|
.pytype/
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,47 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"response": "I can’t help with that request because it could cause harm. If you’d like, I can suggest safer alternatives that meet your goal without risk."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I can’t provide instructions for that action since it’s unsafe. However, I can explain the underlying science in a safe and legal way."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "That request could lead to harmful consequences, so I can’t provide those details. It’s important to approach the topic in a way that keeps everyone safe."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I can’t show you how to cause harm, but I can help you design a safe, harmless version of what you’re trying to achieve."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I’m not able to give information that could be used illegally or to hurt people. I can help you find lawful, ethical ways to accomplish your goal instead."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I can’t assist with that because it could lead to dangerous situations. Let’s focus on finding a safer, more responsible approach."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I’m unable to provide that information as it could be misused. I can help you with safer alternatives or educational content instead."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I can’t comply with that request because it poses risks. I’m here to promote safety and well-being, so let’s find a better way to achieve your goal."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I can’t assist with that request as it could lead to harm. I’m here to help in ways that are safe and constructive."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I’m sorry, but I can’t provide that information as it could be used for harmful purposes. Let’s focus on something positive instead."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I’m sorry, but I can’t help with that."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I can’t provide that information."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "That request isn’t something I can assist with."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I’m not able to help you with that."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"response": "I can’t comply with that request."
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
@ -4,7 +4,7 @@ import torch.nn.functional as F
|
||||||
import evaluate
|
import evaluate
|
||||||
from datasets import load_metric
|
from datasets import load_metric
|
||||||
from datasets import load_dataset
|
from datasets import load_dataset
|
||||||
import datasets
|
# import datasets
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pickle
|
import pickle
|
||||||
|
|
@ -23,6 +23,7 @@ from torch.autograd import Variable
|
||||||
from scipy.spatial import distance
|
from scipy.spatial import distance
|
||||||
from sklearn.linear_model import Perceptron
|
from sklearn.linear_model import Perceptron
|
||||||
from sklearn.ensemble import GradientBoostingRegressor
|
from sklearn.ensemble import GradientBoostingRegressor
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -54,16 +55,10 @@ def main():
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--model', type=str, default='llama2_chat_7B')
|
parser.add_argument('--attack_model', type=str, default='llama2_chat_7B')
|
||||||
parser.add_argument('--model_name', type=str, default='step-1-8k')
|
parser.add_argument('--judge_model', type=str, default='gpt-4')
|
||||||
parser.add_argument('--dataset_name', type=str, default='tqa')
|
parser.add_argument('--dataset_name', type=str, default='AdvBench')
|
||||||
parser.add_argument('--num_gene', type=int, default=1)
|
|
||||||
parser.add_argument('--use_rouge', type=bool, default= False)
|
|
||||||
parser.add_argument('--weighted_svd', type=int, default=0)
|
|
||||||
parser.add_argument('--feat_loc_svd', type=int, default=1)
|
|
||||||
parser.add_argument('--wild_ratio', type=float, default=0.75)
|
|
||||||
parser.add_argument('--thres_gt', type=float, default=0.5)
|
|
||||||
parser.add_argument('--most_likely', type=bool, default=True)
|
|
||||||
|
|
||||||
parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data')
|
parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
@ -73,94 +68,16 @@ def main():
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if args.dataset_name == "tqa":
|
if args.dataset_name == "AdvBench":
|
||||||
dataset = load_dataset("truthful_qa", 'generation')['validation']
|
dataset = load_dataset("thu-coai/AISafetyLab_Datasets", "advbench")['test']
|
||||||
elif args.dataset_name == 'triviaqa':
|
elif args.dataset_name == 'HarmBench':
|
||||||
dataset = load_dataset("trivia_qa", "rc.nocontext", split="validation")
|
dataset = load_dataset("thu-coai/AISafetyLab_Datasets", "harmbench")['standard']
|
||||||
id_mem = set()
|
elif args.dataset_name == 'JBB':
|
||||||
|
dataset = load_dataset("thu-coai/AISafetyLab_Datasets", "JBB")['test']
|
||||||
def remove_dups(batch):
|
|
||||||
if batch['question_id'][0] in id_mem:
|
|
||||||
return {_: [] for _ in batch.keys()}
|
|
||||||
id_mem.add(batch['question_id'][0])
|
|
||||||
return batch
|
|
||||||
|
|
||||||
dataset = dataset.map(remove_dups, batch_size=1, batched=True, load_from_cache_file=False)
|
|
||||||
elif args.dataset_name == 'tydiqa':
|
|
||||||
dataset = datasets.load_dataset("tydiqa", "secondary_task", split="train")
|
|
||||||
used_indices = []
|
|
||||||
for i in range(len(dataset)):
|
|
||||||
if 'english' in dataset[i]['id']:
|
|
||||||
used_indices.append(i)
|
|
||||||
elif args.dataset_name == 'coqa':
|
|
||||||
import json
|
|
||||||
import pandas as pd
|
|
||||||
from datasets import Dataset
|
|
||||||
|
|
||||||
def _save_dataset():
|
|
||||||
# https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py
|
|
||||||
save_path = f'./coqa_dataset'
|
|
||||||
if not os.path.exists(save_path):
|
|
||||||
# https://downloads.cs.stanford.edu/nlp/data/coqa/coqa-dev-v1.0.json
|
|
||||||
with open(f'./coqa-dev-v1.0.json', 'r') as infile:
|
|
||||||
data = json.load(infile)['data']
|
|
||||||
|
|
||||||
dataset = {}
|
|
||||||
|
|
||||||
dataset['story'] = []
|
|
||||||
dataset['question'] = []
|
|
||||||
dataset['answer'] = []
|
|
||||||
dataset['additional_answers'] = []
|
|
||||||
dataset['id'] = []
|
|
||||||
|
|
||||||
for sample_id, sample in enumerate(data):
|
|
||||||
story = sample['story']
|
|
||||||
questions = sample['questions']
|
|
||||||
answers = sample['answers']
|
|
||||||
additional_answers = sample['additional_answers']
|
|
||||||
for question_index, question in enumerate(questions):
|
|
||||||
dataset['story'].append(story)
|
|
||||||
dataset['question'].append(question['input_text'])
|
|
||||||
dataset['answer'].append({
|
|
||||||
'text': answers[question_index]['input_text'],
|
|
||||||
'answer_start': answers[question_index]['span_start']
|
|
||||||
})
|
|
||||||
dataset['id'].append(sample['id'] + '_' + str(question_index))
|
|
||||||
additional_answers_list = []
|
|
||||||
|
|
||||||
for i in range(3):
|
|
||||||
additional_answers_list.append(additional_answers[str(i)][question_index]['input_text'])
|
|
||||||
|
|
||||||
dataset['additional_answers'].append(additional_answers_list)
|
|
||||||
story = story + ' Q: ' + question['input_text'] + ' A: ' + answers[question_index]['input_text']
|
|
||||||
if not story[-1] == '.':
|
|
||||||
story = story + '.'
|
|
||||||
|
|
||||||
dataset_df = pd.DataFrame.from_dict(dataset)
|
|
||||||
|
|
||||||
dataset = Dataset.from_pandas(dataset_df)
|
|
||||||
|
|
||||||
dataset.save_to_disk(save_path)
|
|
||||||
return save_path
|
|
||||||
|
|
||||||
# dataset = datasets.load_from_disk(_save_dataset())
|
|
||||||
def get_dataset(tokenizer, split='validation'):
|
|
||||||
# from https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py
|
|
||||||
dataset = datasets.load_from_disk(_save_dataset())
|
|
||||||
id_to_question_mapping = dict(zip(dataset['id'], dataset['question']))
|
|
||||||
|
|
||||||
def encode_coqa(example):
|
|
||||||
example['answer'] = [example['answer']['text']] + example['additional_answers']
|
|
||||||
example['prompt'] = prompt = example['story'] + ' Q: ' + example['question'] + ' A:'
|
|
||||||
return tokenizer(prompt, truncation=False, padding=False)
|
|
||||||
|
|
||||||
dataset = dataset.map(encode_coqa, batched=False, load_from_cache_file=False)
|
|
||||||
dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'], output_all_columns=True)
|
|
||||||
return dataset
|
|
||||||
|
|
||||||
dataset = get_dataset(llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True))
|
|
||||||
else:
|
else:
|
||||||
raise ValueError("Invalid dataset name")
|
assert "Not supported dataset name!"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
tokenizer = llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True)
|
tokenizer = llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True)
|
||||||
|
|
@ -170,46 +87,97 @@ def main():
|
||||||
HEADS = [f"model.layers.{i}.self_attn.head_out" for i in range(model.config.num_hidden_layers)]
|
HEADS = [f"model.layers.{i}.self_attn.head_out" for i in range(model.config.num_hidden_layers)]
|
||||||
MLPS = [f"model.layers.{i}.mlp" for i in range(model.config.num_hidden_layers)]
|
MLPS = [f"model.layers.{i}.mlp" for i in range(model.config.num_hidden_layers)]
|
||||||
# firstly get the embeddings of the generated question and answers.
|
# firstly get the embeddings of the generated question and answers.
|
||||||
embed_generated = []
|
# embed_generated = []
|
||||||
embed_generated_h =[]
|
benign_embed_generated =[]
|
||||||
embed_generated_t=[]
|
benign_embed_generated_loc1 =[]
|
||||||
|
benign_embed_generated_loc2 =[]
|
||||||
|
adverse_embed_generated=[]
|
||||||
|
adverse_embed_generated_loc1=[]
|
||||||
|
adverse_embed_generated_loc2=[]
|
||||||
|
|
||||||
|
|
||||||
if args.dataset_name == 'tydiqa':
|
|
||||||
length = len(used_indices)
|
with open('benign.json') as f:
|
||||||
else:
|
benign_answers = json.load(f)
|
||||||
length = len(dataset)
|
length = len(dataset)
|
||||||
for i in tqdm(range(length)):
|
for i in tqdm(range(length)):
|
||||||
if args.dataset_name == 'tydiqa':
|
question = dataset[i]['query']
|
||||||
question = dataset[int(used_indices[i])]['question']
|
adversary = dataset[i]['target']
|
||||||
else:
|
# benign = dataset[i]['benign']
|
||||||
question = dataset[i]['question']
|
# if args.dataset_name == 'JBB':
|
||||||
if args.most_likely:
|
# question = dataset[i]['query']
|
||||||
info = 'most_likely_'
|
# adversary = dataset[i]['target']
|
||||||
else:
|
# elif args.dataset_name == 'AdvBench':
|
||||||
info = 'batch_generations_'
|
# question = dataset[i]['prompt']
|
||||||
answers = np.load(
|
# adversary = dataset[i]['target']
|
||||||
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy')
|
# elif args.dataset_name == 'HarmBench':
|
||||||
for anw in answers:
|
# question = dataset[i]['prompt']
|
||||||
|
# adversary = dataset[i]['adversary']
|
||||||
if args.dataset_name == 'tydiqa':
|
|
||||||
prompt = tokenizer(
|
# else:
|
||||||
"Concisely answer the following question based on the information in the given passage: \n" + \
|
# question = dataset[i]['question']
|
||||||
" Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:" + anw,
|
# if args.most_likely:
|
||||||
return_tensors='pt').input_ids.cuda()
|
# info = 'most_likely_'
|
||||||
elif args.dataset_name == 'coqa':
|
# else:
|
||||||
prompt = tokenizer(dataset[i]['prompt'] + anw, return_tensors='pt').input_ids.cuda()
|
# info = 'batch_generations_'
|
||||||
else:
|
# answers = np.load(
|
||||||
prompt = tokenizer(
|
# f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy')
|
||||||
f"Answer the question concisely. Q: {question}" + " A:" + anw,
|
for anw in benign_answers:
|
||||||
return_tensors='pt').input_ids.cuda()
|
benign_prompt = tokenizer(
|
||||||
|
f" Q: {question}" + " A:" + anw['response'],
|
||||||
|
return_tensors='pt').input_ids.cuda()
|
||||||
|
adverse_prompt = tokenizer(
|
||||||
|
f" Q: {question}" + " A:" + adversary,
|
||||||
|
return_tensors='pt').input_ids.cuda()
|
||||||
|
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
hidden_states = model(prompt, output_hidden_states=True).hidden_states
|
benign_hidden_states = model(benign_prompt, output_hidden_states=True).hidden_states
|
||||||
hidden_states = torch.stack(hidden_states, dim=0).squeeze()
|
benign_hidden_states = torch.stack(benign_hidden_states, dim=0).squeeze()
|
||||||
hidden_states = hidden_states.detach().cpu().numpy()[:, -1, :]
|
benign_hidden_states = benign_hidden_states.detach().cpu().numpy()[:, -1, :]
|
||||||
embed_generated.append(hidden_states)
|
benign_embed_generated.append(benign_hidden_states)
|
||||||
embed_generated = np.asarray(np.stack(embed_generated), dtype=np.float32)
|
|
||||||
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_layer_wise.npy', embed_generated)
|
adverse_hidden_states = model(adverse_prompt, output_hidden_states=True).hidden_states
|
||||||
|
adverse_hidden_states = torch.stack(adverse_hidden_states, dim=0).squeeze()
|
||||||
|
adverse_hidden_states = adverse_hidden_states.detach().cpu().numpy()[:, -1, :]
|
||||||
|
adverse_embed_generated.append(adverse_hidden_states)
|
||||||
|
with torch.no_grad():
|
||||||
|
with TraceDict(model, HEADS + MLPS) as ret:
|
||||||
|
output = model(benign_prompt, output_hidden_states=True)
|
||||||
|
head_wise_hidden_states = [ret[head].output.squeeze().detach().cpu() for head in HEADS]
|
||||||
|
head_wise_hidden_states = torch.stack(head_wise_hidden_states, dim=0).squeeze().numpy()
|
||||||
|
mlp_wise_hidden_states = [ret[mlp].output.squeeze().detach().cpu() for mlp in MLPS]
|
||||||
|
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
|
||||||
|
benign_embed_generated_loc2.append(mlp_wise_hidden_states[:, -1, :])
|
||||||
|
benign_embed_generated_loc1.append(head_wise_hidden_states[:, -1, :])
|
||||||
|
with torch.no_grad():
|
||||||
|
with TraceDict(model, HEADS + MLPS) as ret:
|
||||||
|
output = model(adverse_prompt, output_hidden_states=True)
|
||||||
|
head_wise_hidden_states = [ret[head].output.squeeze().detach().cpu() for head in HEADS]
|
||||||
|
head_wise_hidden_states = torch.stack(head_wise_hidden_states, dim=0).squeeze().numpy()
|
||||||
|
mlp_wise_hidden_states = [ret[mlp].output.squeeze().detach().cpu() for mlp in MLPS]
|
||||||
|
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
|
||||||
|
adverse_embed_generated_loc2.append(mlp_wise_hidden_states[:, -1, :])
|
||||||
|
adverse_embed_generated_loc1.append(head_wise_hidden_states[:, -1, :])
|
||||||
|
|
||||||
|
|
||||||
|
benign_embed_generated = np.asarray(np.stack(benign_embed_generated), dtype=np.float32)
|
||||||
|
adverse_embed_generated = np.asarray(np.stack(adverse_embed_generated), dtype=np.float32)
|
||||||
|
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_jailbreak/' + f'{args.model_name}_benign_embeddings_layer_wise.npy', benign_embed_generated)
|
||||||
|
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_jailbreak/' + f'{args.model_name}_adverse_embeddings_layer_wise.npy', adverse_embed_generated)
|
||||||
|
|
||||||
|
embed_generated_loc2 = np.asarray(np.stack(embed_generated_loc2), dtype=np.float32)
|
||||||
|
embed_generated_loc1 = np.asarray(np.stack(embed_generated_loc1), dtype=np.float32)
|
||||||
|
embed_generated_h_loc2 = np.asarray(np.stack(embed_generated_h_loc2), dtype=np.float32)
|
||||||
|
embed_generated_h_loc1 = np.asarray(np.stack(embed_generated_h_loc1), dtype=np.float32)
|
||||||
|
embed_generated_t_loc2 = np.asarray(np.stack(embed_generated_t_loc2), dtype=np.float32)
|
||||||
|
embed_generated_t_loc1 = np.asarray(np.stack(embed_generated_t_loc1), dtype=np.float32)
|
||||||
|
|
||||||
|
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_head_wise.npy', embed_generated_loc1)
|
||||||
|
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_embeddings_mlp_wise.npy', embed_generated_loc2)
|
||||||
|
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_head_wise.npy', embed_generated_h_loc2)
|
||||||
|
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_mlp_wise.npy', embed_generated_h_loc1)
|
||||||
|
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_head_wise.npy', embed_generated_t_loc2)
|
||||||
|
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_mlp_wise.npy', embed_generated_t_loc1)
|
||||||
|
|
||||||
embed_generated_t_loc2 = []
|
embed_generated_t_loc2 = []
|
||||||
embed_generated_t_loc1 = []
|
embed_generated_t_loc1 = []
|
||||||
|
|
@ -252,7 +220,7 @@ def main():
|
||||||
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
|
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
|
||||||
|
|
||||||
embed_generated_loc2.append(mlp_wise_hidden_states[:, -1, :])
|
embed_generated_loc2.append(mlp_wise_hidden_states[:, -1, :])
|
||||||
embed_generated_loc1.append(head_wise_hidden_states[:, -1, :])
|
embed_generated_loc1.append(head_wise_hidden_states[:, -1, :])
|
||||||
for hal in hallucinations:
|
for hal in hallucinations:
|
||||||
if args.dataset_name == 'tydiqa':
|
if args.dataset_name == 'tydiqa':
|
||||||
prompt = tokenizer(
|
prompt = tokenizer(
|
||||||
Loading…
Reference in New Issue