This commit is contained in:
li 2025-08-14 19:56:20 +08:00
parent d828c6de9a
commit 8498d724d1
3 changed files with 151 additions and 135 deletions

1
.gitignore vendored
View File

@ -150,6 +150,7 @@ dmypy.json
# Pyre type checker
.pyre/
.vscode/
# pytype static type analyzer
.pytype/

47
benign.json Normal file
View File

@ -0,0 +1,47 @@
[
{
"response": "I cant help with that request because it could cause harm. If youd like, I can suggest safer alternatives that meet your goal without risk."
},
{
"response": "I cant provide instructions for that action since its unsafe. However, I can explain the underlying science in a safe and legal way."
},
{
"response": "That request could lead to harmful consequences, so I cant provide those details. Its important to approach the topic in a way that keeps everyone safe."
},
{
"response": "I cant show you how to cause harm, but I can help you design a safe, harmless version of what youre trying to achieve."
},
{
"response": "Im not able to give information that could be used illegally or to hurt people. I can help you find lawful, ethical ways to accomplish your goal instead."
},
{
"response": "I cant assist with that because it could lead to dangerous situations. Lets focus on finding a safer, more responsible approach."
},
{
"response": "Im unable to provide that information as it could be misused. I can help you with safer alternatives or educational content instead."
},
{
"response": "I cant comply with that request because it poses risks. Im here to promote safety and well-being, so lets find a better way to achieve your goal."
},
{
"response": "I cant assist with that request as it could lead to harm. Im here to help in ways that are safe and constructive."
},
{
"response": "Im sorry, but I cant provide that information as it could be used for harmful purposes. Lets focus on something positive instead."
},
{
"response": "Im sorry, but I cant help with that."
},
{
"response": "I cant provide that information."
},
{
"response": "That request isnt something I can assist with."
},
{
"response": "Im not able to help you with that."
},
{
"response": "I cant comply with that request."
}
]

View File

@ -4,7 +4,7 @@ import torch.nn.functional as F
import evaluate
from datasets import load_metric
from datasets import load_dataset
import datasets
# import datasets
from tqdm import tqdm
import numpy as np
import pickle
@ -23,6 +23,7 @@ from torch.autograd import Variable
from scipy.spatial import distance
from sklearn.linear_model import Perceptron
from sklearn.ensemble import GradientBoostingRegressor
import json
@ -54,16 +55,10 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='llama2_chat_7B')
parser.add_argument('--model_name', type=str, default='step-1-8k')
parser.add_argument('--dataset_name', type=str, default='tqa')
parser.add_argument('--num_gene', type=int, default=1)
parser.add_argument('--use_rouge', type=bool, default= False)
parser.add_argument('--weighted_svd', type=int, default=0)
parser.add_argument('--feat_loc_svd', type=int, default=1)
parser.add_argument('--wild_ratio', type=float, default=0.75)
parser.add_argument('--thres_gt', type=float, default=0.5)
parser.add_argument('--most_likely', type=bool, default=True)
parser.add_argument('--attack_model', type=str, default='llama2_chat_7B')
parser.add_argument('--judge_model', type=str, default='gpt-4')
parser.add_argument('--dataset_name', type=str, default='AdvBench')
parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data')
args = parser.parse_args()
@ -73,94 +68,16 @@ def main():
if args.dataset_name == "tqa":
dataset = load_dataset("truthful_qa", 'generation')['validation']
elif args.dataset_name == 'triviaqa':
dataset = load_dataset("trivia_qa", "rc.nocontext", split="validation")
id_mem = set()
def remove_dups(batch):
if batch['question_id'][0] in id_mem:
return {_: [] for _ in batch.keys()}
id_mem.add(batch['question_id'][0])
return batch
dataset = dataset.map(remove_dups, batch_size=1, batched=True, load_from_cache_file=False)
elif args.dataset_name == 'tydiqa':
dataset = datasets.load_dataset("tydiqa", "secondary_task", split="train")
used_indices = []
for i in range(len(dataset)):
if 'english' in dataset[i]['id']:
used_indices.append(i)
elif args.dataset_name == 'coqa':
import json
import pandas as pd
from datasets import Dataset
def _save_dataset():
# https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py
save_path = f'./coqa_dataset'
if not os.path.exists(save_path):
# https://downloads.cs.stanford.edu/nlp/data/coqa/coqa-dev-v1.0.json
with open(f'./coqa-dev-v1.0.json', 'r') as infile:
data = json.load(infile)['data']
dataset = {}
dataset['story'] = []
dataset['question'] = []
dataset['answer'] = []
dataset['additional_answers'] = []
dataset['id'] = []
for sample_id, sample in enumerate(data):
story = sample['story']
questions = sample['questions']
answers = sample['answers']
additional_answers = sample['additional_answers']
for question_index, question in enumerate(questions):
dataset['story'].append(story)
dataset['question'].append(question['input_text'])
dataset['answer'].append({
'text': answers[question_index]['input_text'],
'answer_start': answers[question_index]['span_start']
})
dataset['id'].append(sample['id'] + '_' + str(question_index))
additional_answers_list = []
for i in range(3):
additional_answers_list.append(additional_answers[str(i)][question_index]['input_text'])
dataset['additional_answers'].append(additional_answers_list)
story = story + ' Q: ' + question['input_text'] + ' A: ' + answers[question_index]['input_text']
if not story[-1] == '.':
story = story + '.'
dataset_df = pd.DataFrame.from_dict(dataset)
dataset = Dataset.from_pandas(dataset_df)
dataset.save_to_disk(save_path)
return save_path
# dataset = datasets.load_from_disk(_save_dataset())
def get_dataset(tokenizer, split='validation'):
# from https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py
dataset = datasets.load_from_disk(_save_dataset())
id_to_question_mapping = dict(zip(dataset['id'], dataset['question']))
def encode_coqa(example):
example['answer'] = [example['answer']['text']] + example['additional_answers']
example['prompt'] = prompt = example['story'] + ' Q: ' + example['question'] + ' A:'
return tokenizer(prompt, truncation=False, padding=False)
dataset = dataset.map(encode_coqa, batched=False, load_from_cache_file=False)
dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'], output_all_columns=True)
return dataset
dataset = get_dataset(llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True))
if args.dataset_name == "AdvBench":
dataset = load_dataset("thu-coai/AISafetyLab_Datasets", "advbench")['test']
elif args.dataset_name == 'HarmBench':
dataset = load_dataset("thu-coai/AISafetyLab_Datasets", "harmbench")['standard']
elif args.dataset_name == 'JBB':
dataset = load_dataset("thu-coai/AISafetyLab_Datasets", "JBB")['test']
else:
raise ValueError("Invalid dataset name")
assert "Not supported dataset name!"
tokenizer = llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True)
@ -170,46 +87,97 @@ def main():
HEADS = [f"model.layers.{i}.self_attn.head_out" for i in range(model.config.num_hidden_layers)]
MLPS = [f"model.layers.{i}.mlp" for i in range(model.config.num_hidden_layers)]
# firstly get the embeddings of the generated question and answers.
embed_generated = []
embed_generated_h =[]
embed_generated_t=[]
# embed_generated = []
benign_embed_generated =[]
benign_embed_generated_loc1 =[]
benign_embed_generated_loc2 =[]
adverse_embed_generated=[]
adverse_embed_generated_loc1=[]
adverse_embed_generated_loc2=[]
if args.dataset_name == 'tydiqa':
length = len(used_indices)
else:
length = len(dataset)
with open('benign.json') as f:
benign_answers = json.load(f)
length = len(dataset)
for i in tqdm(range(length)):
if args.dataset_name == 'tydiqa':
question = dataset[int(used_indices[i])]['question']
else:
question = dataset[i]['question']
if args.most_likely:
info = 'most_likely_'
else:
info = 'batch_generations_'
answers = np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy')
for anw in answers:
question = dataset[i]['query']
adversary = dataset[i]['target']
# benign = dataset[i]['benign']
# if args.dataset_name == 'JBB':
# question = dataset[i]['query']
# adversary = dataset[i]['target']
# elif args.dataset_name == 'AdvBench':
# question = dataset[i]['prompt']
# adversary = dataset[i]['target']
# elif args.dataset_name == 'HarmBench':
# question = dataset[i]['prompt']
# adversary = dataset[i]['adversary']
# else:
# question = dataset[i]['question']
# if args.most_likely:
# info = 'most_likely_'
# else:
# info = 'batch_generations_'
# answers = np.load(
# f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy')
for anw in benign_answers:
benign_prompt = tokenizer(
f" Q: {question}" + " A:" + anw['response'],
return_tensors='pt').input_ids.cuda()
adverse_prompt = tokenizer(
f" Q: {question}" + " A:" + adversary,
return_tensors='pt').input_ids.cuda()
if args.dataset_name == 'tydiqa':
prompt = tokenizer(
"Concisely answer the following question based on the information in the given passage: \n" + \
" Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:" + anw,
return_tensors='pt').input_ids.cuda()
elif args.dataset_name == 'coqa':
prompt = tokenizer(dataset[i]['prompt'] + anw, return_tensors='pt').input_ids.cuda()
else:
prompt = tokenizer(
f"Answer the question concisely. Q: {question}" + " A:" + anw,
return_tensors='pt').input_ids.cuda()
with torch.no_grad():
hidden_states = model(prompt, output_hidden_states=True).hidden_states
hidden_states = torch.stack(hidden_states, dim=0).squeeze()
hidden_states = hidden_states.detach().cpu().numpy()[:, -1, :]
embed_generated.append(hidden_states)
embed_generated = np.asarray(np.stack(embed_generated), dtype=np.float32)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_layer_wise.npy', embed_generated)
benign_hidden_states = model(benign_prompt, output_hidden_states=True).hidden_states
benign_hidden_states = torch.stack(benign_hidden_states, dim=0).squeeze()
benign_hidden_states = benign_hidden_states.detach().cpu().numpy()[:, -1, :]
benign_embed_generated.append(benign_hidden_states)
adverse_hidden_states = model(adverse_prompt, output_hidden_states=True).hidden_states
adverse_hidden_states = torch.stack(adverse_hidden_states, dim=0).squeeze()
adverse_hidden_states = adverse_hidden_states.detach().cpu().numpy()[:, -1, :]
adverse_embed_generated.append(adverse_hidden_states)
with torch.no_grad():
with TraceDict(model, HEADS + MLPS) as ret:
output = model(benign_prompt, output_hidden_states=True)
head_wise_hidden_states = [ret[head].output.squeeze().detach().cpu() for head in HEADS]
head_wise_hidden_states = torch.stack(head_wise_hidden_states, dim=0).squeeze().numpy()
mlp_wise_hidden_states = [ret[mlp].output.squeeze().detach().cpu() for mlp in MLPS]
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
benign_embed_generated_loc2.append(mlp_wise_hidden_states[:, -1, :])
benign_embed_generated_loc1.append(head_wise_hidden_states[:, -1, :])
with torch.no_grad():
with TraceDict(model, HEADS + MLPS) as ret:
output = model(adverse_prompt, output_hidden_states=True)
head_wise_hidden_states = [ret[head].output.squeeze().detach().cpu() for head in HEADS]
head_wise_hidden_states = torch.stack(head_wise_hidden_states, dim=0).squeeze().numpy()
mlp_wise_hidden_states = [ret[mlp].output.squeeze().detach().cpu() for mlp in MLPS]
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
adverse_embed_generated_loc2.append(mlp_wise_hidden_states[:, -1, :])
adverse_embed_generated_loc1.append(head_wise_hidden_states[:, -1, :])
benign_embed_generated = np.asarray(np.stack(benign_embed_generated), dtype=np.float32)
adverse_embed_generated = np.asarray(np.stack(adverse_embed_generated), dtype=np.float32)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_jailbreak/' + f'{args.model_name}_benign_embeddings_layer_wise.npy', benign_embed_generated)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_jailbreak/' + f'{args.model_name}_adverse_embeddings_layer_wise.npy', adverse_embed_generated)
embed_generated_loc2 = np.asarray(np.stack(embed_generated_loc2), dtype=np.float32)
embed_generated_loc1 = np.asarray(np.stack(embed_generated_loc1), dtype=np.float32)
embed_generated_h_loc2 = np.asarray(np.stack(embed_generated_h_loc2), dtype=np.float32)
embed_generated_h_loc1 = np.asarray(np.stack(embed_generated_h_loc1), dtype=np.float32)
embed_generated_t_loc2 = np.asarray(np.stack(embed_generated_t_loc2), dtype=np.float32)
embed_generated_t_loc1 = np.asarray(np.stack(embed_generated_t_loc1), dtype=np.float32)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_head_wise.npy', embed_generated_loc1)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_embeddings_mlp_wise.npy', embed_generated_loc2)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_head_wise.npy', embed_generated_h_loc2)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_mlp_wise.npy', embed_generated_h_loc1)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_head_wise.npy', embed_generated_t_loc2)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_mlp_wise.npy', embed_generated_t_loc1)
embed_generated_t_loc2 = []
embed_generated_t_loc1 = []