diff --git a/.gitignore b/.gitignore index 78fbfd1..09c8938 100644 --- a/.gitignore +++ b/.gitignore @@ -174,6 +174,7 @@ poetry.toml data/ save_for_eval/ models/ +visualizations/ # LSP config files pyrightconfig.json diff --git a/b.sh b/b.sh index 33d0790..50e55eb 100644 --- a/b.sh +++ b/b.sh @@ -1,2 +1,2 @@ export HF_ENDPOINT=https://hf-mirror.com -CUDA_VISIBLE_DEVICES=1 python jailbreak_llama.py \ No newline at end of file +CUDA_VISIBLE_DEVICES=8 python jailbreak_llama.py \ No newline at end of file diff --git a/generate_data.py b/generate_data.py new file mode 100644 index 0000000..e69de29 diff --git a/hal_det_opt.py b/hal_det_opt.py deleted file mode 100644 index c4c5d6f..0000000 --- a/hal_det_opt.py +++ /dev/null @@ -1,681 +0,0 @@ -import os -import torch -import torch.nn.functional as F -import evaluate -from datasets import load_metric -from datasets import load_dataset -from tqdm import tqdm -import numpy as np -import pickle -from utils import get_llama_activations_bau, tokenized_tqa, tokenized_tqa_gen, tokenized_tqa_gen_end_q -import llama_iti -import pickle -import argparse -import matplotlib.pyplot as plt -from pprint import pprint -from baukit import Trace, TraceDict -from metric_utils import get_measures, print_measures -import re -from torch.autograd import Variable -from transformers import AutoModelForCausalLM, AutoTokenizer - - -def seed_everything(seed: int): - import random, os - import numpy as np - import torch - - random.seed(seed) - os.environ['PYTHONHASHSEED'] = str(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = True - -HF_NAMES = { - 'llama_7B': 'baffo32/decapoda-research-llama-7B-hf', - 'honest_llama_7B': 'validation/results_dump/llama_7B_seed_42_top_48_heads_alpha_15', - 'alpaca_7B': 'circulus/alpaca-7b', - 'vicuna_7B': 'AlekseyKorshuk/vicuna-7b', - 'llama2_chat_7B': 'models/Llama-2-7b-chat-hf', - 'llama2_chat_13B': 'models/Llama-2-13b-chat-hf', - "opt-6.7b": "models/opt-6.7b", - "opt-13b": "models/opt-13b", -} - - -def main(): - - - parser = argparse.ArgumentParser() - parser.add_argument('--model_name', type=str, default='opt-6.7b') - parser.add_argument('--dataset_name', type=str, default='tqa') - parser.add_argument('--num_gene', type=int, default=1) - parser.add_argument('--gene', type=int, default=0) - parser.add_argument('--generate_gt', type=int, default=0) - parser.add_argument('--use_rouge', type=int, default=0) - parser.add_argument('--weighted_svd', type=int, default=0) - parser.add_argument('--feat_loc_svd', type=int, default=0) - parser.add_argument('--wild_ratio', type=float, default=0.75) - parser.add_argument('--thres_gt', type=float, default=0.5) - parser.add_argument('--most_likely', type=int, default=0) - - parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data') - args = parser.parse_args() - - MODEL = HF_NAMES[args.model_name] if not args.model_dir else args.model_dir - - - - - if args.dataset_name == "tqa": - dataset = load_dataset("truthful_qa", 'generation')['validation'] - elif args.dataset_name == 'triviaqa': - dataset = load_dataset("trivia_qa", "rc.nocontext", split="validation") - id_mem = set() - - def remove_dups(batch): - if batch['question_id'][0] in id_mem: - return {_: [] for _ in batch.keys()} - id_mem.add(batch['question_id'][0]) - return batch - - dataset = dataset.map(remove_dups, batch_size=1, batched=True, load_from_cache_file=False) - elif args.dataset_name == 'tydiqa': - dataset = datasets.load_dataset("tydiqa", "secondary_task", split="train") - used_indices = [] - for i in range(len(dataset)): - if 'english' in dataset[i]['id']: - used_indices.append(i) - elif args.dataset_name == 'coqa': - import json - import pandas as pd - from datasets import Dataset - - def _save_dataset(): - # https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py - save_path = f'./coqa_dataset' - if not os.path.exists(save_path): - # https://downloads.cs.stanford.edu/nlp/data/coqa/coqa-dev-v1.0.json - with open(f'./coqa-dev-v1.0.json', 'r') as infile: - data = json.load(infile)['data'] - - dataset = {} - - dataset['story'] = [] - dataset['question'] = [] - dataset['answer'] = [] - dataset['additional_answers'] = [] - dataset['id'] = [] - - for sample_id, sample in enumerate(data): - story = sample['story'] - questions = sample['questions'] - answers = sample['answers'] - additional_answers = sample['additional_answers'] - for question_index, question in enumerate(questions): - dataset['story'].append(story) - dataset['question'].append(question['input_text']) - dataset['answer'].append({ - 'text': answers[question_index]['input_text'], - 'answer_start': answers[question_index]['span_start'] - }) - dataset['id'].append(sample['id'] + '_' + str(question_index)) - additional_answers_list = [] - - for i in range(3): - additional_answers_list.append(additional_answers[str(i)][question_index]['input_text']) - - dataset['additional_answers'].append(additional_answers_list) - story = story + ' Q: ' + question['input_text'] + ' A: ' + answers[question_index]['input_text'] - if not story[-1] == '.': - story = story + '.' - - dataset_df = pd.DataFrame.from_dict(dataset) - - dataset = Dataset.from_pandas(dataset_df) - - dataset.save_to_disk(save_path) - return save_path - - # dataset = datasets.load_from_disk(_save_dataset()) - def get_dataset(tokenizer, split='validation'): - # from https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py - dataset = datasets.load_from_disk(_save_dataset()) - id_to_question_mapping = dict(zip(dataset['id'], dataset['question'])) - - def encode_coqa(example): - example['answer'] = [example['answer']['text']] + example['additional_answers'] - example['prompt'] = prompt = example['story'] + ' Q: ' + example['question'] + ' A:' - return tokenizer(prompt, truncation=False, padding=False) - - dataset = dataset.map(encode_coqa, batched=False, load_from_cache_file=False) - dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'], output_all_columns=True) - return dataset - - dataset = get_dataset(llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True)) - else: - raise ValueError("Invalid dataset name") - - if args.gene: - - model = AutoModelForCausalLM.from_pretrained(MODEL, - torch_dtype=torch.float16).cuda() - tokenizer = AutoTokenizer.from_pretrained(MODEL, use_fast=False) - - begin_index = 0 - if args.dataset_name == 'tydiqa': - end_index = len(used_indices) - else: - end_index = len(dataset) - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}_hal_det_opt/'): - os.mkdir(f'./save_for_eval/{args.dataset_name}_hal_det_opt/') - - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}_hal_det_opt/answers'): - os.mkdir(f'./save_for_eval/{args.dataset_name}_hal_det_opt/answers') - - - - period_token_id = tokenizer('. ')['input_ids'][1] - eos_tokens = ['Question:', ' Question:', '\n', 'Answer:', ' Answer:', 'Q:'] - question_framing_ids = [[tokenizer(eos_token)['input_ids'][1]] for eos_token in eos_tokens] - - - - for i in range(begin_index, end_index): - answers = [None] * args.num_gene - if args.dataset_name == 'tydiqa': - question = dataset[int(used_indices[i])]['question'] - prompt = tokenizer( - "Concisely answer the following question based on the information in the given passage: \n" + \ - " Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:", - return_tensors='pt').input_ids.cuda() - elif args.dataset_name == 'coqa': - prompt = tokenizer( - dataset[i]['prompt'], return_tensors='pt').input_ids.cuda() - else: - question = dataset[i]['question'] - prompt = tokenizer(f"Answer the question concisely. Q: {question}" + " A:", return_tensors='pt').input_ids.cuda() - for gen_iter in range(args.num_gene): - if args.most_likely: - generated = model.generate(prompt, - num_beams=5, - num_return_sequences=1, - do_sample=False, - max_new_tokens=64, - eos_token_id=period_token_id, - bad_words_ids=question_framing_ids - ) - else: - generated = model.generate(prompt, - do_sample=True, - num_return_sequences=1, - num_beams=1, - max_new_tokens=64, - temperature=0.5, - top_p=1.0, - eos_token_id=period_token_id, - bad_words_ids=question_framing_ids - ) - - - decoded = tokenizer.decode(generated[0, prompt.shape[-1]:], - skip_special_tokens=True) - if args.dataset_name == 'tqa' or args.dataset_name == 'triviaqa': - # corner case. - if 'Answer the question concisely' in decoded: - print('#####error') - print(decoded.split('Answer the question concisely')[1]) - print('#####error') - decoded = decoded.split('Answer the question concisely')[0] - if args.dataset_name == 'coqa': - if 'Q:' in decoded: - print('#####error') - print(decoded.split('Q:')[1]) - print('#####error') - decoded = decoded.split('Q:')[0] - print(decoded) - answers[gen_iter] = decoded - - - print('sample: ', i) - if args.most_likely: - info = 'most_likely_' - else: - info = 'batch_generations_' - print("Saving answers") - np.save(f'./save_for_eval/{args.dataset_name}_hal_det_opt/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy', - answers) - elif args.generate_gt: - from bleurt_pytorch import BleurtConfig, BleurtForSequenceClassification, BleurtTokenizer - - model = BleurtForSequenceClassification.from_pretrained('./models/BLEURT-20').cuda() - tokenizer = BleurtTokenizer.from_pretrained('./models/BLEURT-20') - model.eval() - - rouge = evaluate.load('rouge') - gts = np.zeros(0) - if args.dataset_name == 'tydiqa': - length = len(used_indices) - else: - length = len(dataset) - for i in range(length): - if args.dataset_name == 'tqa': - best_answer = dataset[i]['best_answer'] - correct_answer = dataset[i]['correct_answers'] - all_answers = [best_answer] + correct_answer - elif args.dataset_name == 'triviaqa': - all_answers = dataset[i]['answer']['aliases'] - elif args.dataset_name == 'coqa': - all_answers = dataset[i]['answer'] - elif args.dataset_name == 'tydiqa': - all_answers = dataset[int(used_indices[i])]['answers']['text'] - - if args.most_likely: - answers = np.load( - f'./save_for_eval/{args.dataset_name}_hal_det_opt/answers/most_likely_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') - else: - answers = np.load( - f'./save_for_eval/{args.dataset_name}_hal_det_opt/answers/batch_generations_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') - # get the gt. - if args.use_rouge: - - predictions = answers - all_results = np.zeros((len(all_answers), len(predictions))) - all_results1 = np.zeros((len(all_answers), len(predictions))) - all_results2 = np.zeros((len(all_answers), len(predictions))) - for anw in range(len(all_answers)): - results = rouge.compute(predictions=predictions, - references=[all_answers[anw]] * len(predictions), - use_aggregator=False) - all_results[anw] = results['rougeL'] - all_results1[anw] = results['rouge1'] - all_results2[anw] = results['rouge2'] - - # breakpoint() - gts = np.concatenate([gts, np.max(all_results, axis=0)], 0) - - if i % 50 == 0: - print("samples passed: ", i) - else: - - predictions = answers - all_results = np.zeros((len(all_answers), len(predictions))) - with torch.no_grad(): - for anw in range(len(all_answers)): - inputs = tokenizer(predictions.tolist(), [all_answers[anw]] * len(predictions), - padding='longest', return_tensors='pt') - for key in list(inputs.keys()): - inputs[key] = inputs[key].cuda() - res = np.asarray(model(**inputs).logits.flatten().tolist()) - all_results[anw] = res - gts = np.concatenate([gts, np.max(all_results, axis=0)], 0) - if i % 10 == 0: - print("samples passed: ", i) - # breakpoint() - if args.most_likely: - if args.use_rouge: - np.save(f'./ml_{args.dataset_name}_rouge_score_opt.npy', gts) - else: - np.save(f'./ml_{args.dataset_name}_bleurt_score_opt.npy', gts) - else: - if args.use_rouge: - np.save(f'./bg_{args.dataset_name}_rouge_score_opt.npy', gts) - else: - np.save(f'./bg_{args.dataset_name}_bleurt_score_opt.npy', gts) - - else: - model = AutoModelForCausalLM.from_pretrained(MODEL, - torch_dtype=torch.float16).cuda() - tokenizer = AutoTokenizer.from_pretrained(MODEL, use_fast=False) - # firstly get the embeddings of the generated question and answers. - embed_generated = [] - - if args.dataset_name == 'tydiqa': - length = len(used_indices) - else: - length = len(dataset) - for i in tqdm(range(length)): - if args.dataset_name == 'tydiqa': - question = dataset[int(used_indices[i])]['question'] - else: - question = dataset[i]['question'] - answers = np.load( - f'save_for_eval/{args.dataset_name}_hal_det_opt/answers/most_likely_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') - - for anw in answers: - - if args.dataset_name == 'tydiqa': - prompt = tokenizer( - "Concisely answer the following question based on the information in the given passage: \n" + \ - " Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:", - return_tensors='pt').input_ids.cuda() - elif args.dataset_name == 'coqa': - prompt = tokenizer(dataset[i]['prompt'] + anw, return_tensors='pt').input_ids.cuda() - else: - prompt = tokenizer( - f"Answer the question concisely. Q: {question}" + " A:" + anw, - return_tensors='pt').input_ids.cuda() - with torch.no_grad(): - hidden_states = model(prompt, output_hidden_states=True).hidden_states - hidden_states = torch.stack(hidden_states, dim=0).squeeze() - hidden_states = hidden_states.detach().cpu().numpy()[:, -1, :] - embed_generated.append(hidden_states) - embed_generated = np.asarray(np.stack(embed_generated), dtype=np.float32) - np.save(f'save_for_eval/{args.dataset_name}_hal_det_opt/most_likely_{args.model_name}_gene_embeddings_layer_wise.npy', embed_generated) - - HEADS = [f"model.decoder.layers.{i}.self_attn.out_proj" for i in range(model.config.num_hidden_layers)] - MLPS = [f"model.decoder.layers.{i}.fc2" for i in range(model.config.num_hidden_layers)] - embed_generated_loc2 = [] - embed_generated_loc1 = [] - for i in tqdm(range(length)): - if args.dataset_name == 'tydiqa': - question = dataset[int(used_indices[i])]['question'] - else: - question = dataset[i]['question'] - - - answers = np.load( - f'save_for_eval/{args.dataset_name}_hal_det_opt/answers/most_likely_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') - for anw in answers: - if args.dataset_name == 'tydiqa': - prompt = tokenizer( - "Concisely answer the following question based on the information in the given passage: \n" + \ - " Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:", - return_tensors='pt').input_ids.cuda() - elif args.dataset_name == 'coqa': - prompt = tokenizer(dataset[i]['prompt'] + anw, return_tensors='pt').input_ids.cuda() - else: - prompt = tokenizer( - f"Answer the question concisely. Q: {question}" + " A:" + anw, - return_tensors='pt').input_ids.cuda() - - with torch.no_grad(): - with TraceDict(model, HEADS + MLPS) as ret: - output = model(prompt, output_hidden_states=True) - head_wise_hidden_states = [ret[head].output.squeeze().detach().cpu() for head in HEADS] - head_wise_hidden_states = torch.stack(head_wise_hidden_states, dim=0).squeeze().numpy() - mlp_wise_hidden_states = [ret[mlp].output.squeeze().detach().cpu() for mlp in MLPS] - mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy() - - embed_generated_loc2.append(mlp_wise_hidden_states[:, -1, :]) - embed_generated_loc1.append(head_wise_hidden_states[:, -1, :]) - embed_generated_loc2 = np.asarray(np.stack(embed_generated_loc2), dtype=np.float32) - embed_generated_loc1 = np.asarray(np.stack(embed_generated_loc1), dtype=np.float32) - - np.save(f'save_for_eval/{args.dataset_name}_hal_det_opt/most_likely_{args.model_name}_gene_embeddings_head_wise.npy', embed_generated_loc1) - np.save(f'save_for_eval/{args.dataset_name}_hal_det_opt/most_likely_{args.model_name}_embeddings_mlp_wise.npy', embed_generated_loc2) - - - - # get the split and label (true or false) of the unlabeled data and the test data. - if args.use_rouge: - gts = np.load(f'./ml_{args.dataset_name}_rouge_score_opt.npy') - gts_bg = np.load(f'./bg_{args.dataset_name}_rouge_score_opt.npy') - else: - gts = np.load(f'./ml_{args.dataset_name}_bleurt_score_opt.npy') - gts_bg = np.load(f'./bg_{args.dataset_name}_bleurt_score_opt.npy') - thres = args.thres_gt - gt_label = np.asarray(gts> thres, dtype=np.int32) - gt_label_bg = np.asarray(gts_bg > thres, dtype=np.int32) - - - if args.dataset_name == 'tydiqa': - length = len(used_indices) - else: - length = len(dataset) - - - permuted_index = np.random.permutation(length) - wild_q_indices = permuted_index[:int(args.wild_ratio * length)] - # exclude validation samples. - wild_q_indices1 = wild_q_indices[:len(wild_q_indices) - 100] - wild_q_indices2 = wild_q_indices[len(wild_q_indices) - 100:] - gt_label_test = [] - gt_label_wild = [] - gt_label_val = [] - for i in range(length): - if i not in wild_q_indices: - gt_label_test.extend(gt_label[i: i+1]) - elif i in wild_q_indices1: - gt_label_wild.extend(gt_label[i: i+1]) - else: - gt_label_val.extend(gt_label[i: i+1]) - gt_label_test = np.asarray(gt_label_test) - gt_label_wild = np.asarray(gt_label_wild) - gt_label_val = np.asarray(gt_label_val) - - - - - def svd_embed_score(embed_generated_wild, gt_label, begin_k, k_span, mean=1, svd=1, weight=0): - embed_generated = embed_generated_wild - best_auroc_over_k = 0 - best_layer_over_k = 0 - best_scores_over_k = None - best_projection_over_k = None - for k in tqdm(range(begin_k, k_span)): - best_auroc = 0 - best_layer = 0 - best_scores = None - mean_recorded = None - best_projection = None - for layer in range(len(embed_generated_wild[0])): - if mean: - mean_recorded = embed_generated[:, layer, :].mean(0) - centered = embed_generated[:, layer, :] - mean_recorded - else: - centered = embed_generated[:, layer, :] - - if not svd: - pca_model = PCA(n_components=k, whiten=False).fit(centered) - projection = pca_model.components_.T - mean_recorded = pca_model.mean_ - if weight: - projection = pca_model.singular_values_ * projection - else: - _, sin_value, V_p = torch.linalg.svd(torch.from_numpy(centered).cuda()) - projection = V_p[:k, :].T.cpu().data.numpy() - if weight: - projection = sin_value[:k] * projection - - - scores = np.mean(np.matmul(centered, projection), -1, keepdims=True) - assert scores.shape[1] == 1 - scores = np.sqrt(np.sum(np.square(scores), axis=1)) - - # not sure about whether true and false data the direction will point to, - # so we test both. similar practices are in the representation engineering paper - # https://arxiv.org/abs/2310.01405 - measures1 = get_measures(scores[gt_label == 1], - scores[gt_label == 0], plot=False) - measures2 = get_measures(-scores[gt_label == 1], - -scores[gt_label == 0], plot=False) - - if measures1[0] > measures2[0]: - measures = measures1 - sign_layer = 1 - else: - measures = measures2 - sign_layer = -1 - - if measures[0] > best_auroc: - best_auroc = measures[0] - best_result = [100 * measures[2], 100 * measures[0]] - best_layer = layer - best_scores = sign_layer * scores - best_projection = projection - best_mean = mean_recorded - best_sign = sign_layer - print('k: ', k, 'best result: ', best_result, 'layer: ', best_layer, - 'mean: ', mean, 'svd: ', svd) - - if best_auroc > best_auroc_over_k: - best_auroc_over_k = best_auroc - best_result_over_k = best_result - best_layer_over_k = best_layer - best_k = k - best_sign_over_k = best_sign - best_scores_over_k = best_scores - best_projection_over_k = best_projection - best_mean_over_k = best_mean - - - return {'k': best_k, - 'best_layer':best_layer_over_k, - 'best_auroc':best_auroc_over_k, - 'best_result':best_result_over_k, - 'best_scores':best_scores_over_k, - 'best_mean': best_mean_over_k, - 'best_sign':best_sign_over_k, - 'best_projection':best_projection_over_k} - - - from sklearn.decomposition import PCA - feat_loc = args.feat_loc_svd - - - - if args.most_likely: - if feat_loc == 3: - embed_generated = np.load(f'save_for_eval/{args.dataset_name}_hal_det_opt/most_likely_{args.model_name}_gene_embeddings_layer_wise.npy', - allow_pickle=True) - elif feat_loc == 2: - embed_generated = np.load( - f'save_for_eval/{args.dataset_name}_hal_det_opt/most_likely_{args.model_name}_gene_embeddings_mlp_wise.npy', - allow_pickle=True) - else: - embed_generated = np.load( - f'save_for_eval/{args.dataset_name}_hal_det_opt/most_likely_{args.model_name}_gene_embeddings_head_wise.npy', - allow_pickle=True) - feat_indices_wild = [] - feat_indices_eval = [] - - if args.dataset_name == 'tydiqa': - length = len(used_indices) - else: - length = len(dataset) - - - for i in range(length): - if i in wild_q_indices1: - feat_indices_wild.extend(np.arange(i, i+1).tolist()) - elif i in wild_q_indices2: - feat_indices_eval.extend(np.arange(i, i + 1).tolist()) - if feat_loc == 3: - embed_generated_wild = embed_generated[feat_indices_wild][:,1:,:] - embed_generated_eval = embed_generated[feat_indices_eval][:, 1:, :] - else: - embed_generated_wild = embed_generated[feat_indices_wild] - embed_generated_eval = embed_generated[feat_indices_eval] - - - - - - # returned_results = svd_embed_score(embed_generated_wild, gt_label_wild, - # 1, 11, mean=0, svd=0, weight=args.weighted_svd) - # get the best hyper-parameters on validation set - returned_results = svd_embed_score(embed_generated_eval, gt_label_val, - 1, 11, mean=0, svd=0, weight=args.weighted_svd) - - pca_model = PCA(n_components=returned_results['k'], whiten=False).fit(embed_generated_wild[:,returned_results['best_layer'],:]) - projection = pca_model.components_.T - if args.weighted_svd: - projection = pca_model.singular_values_ * projection - scores = np.mean(np.matmul(embed_generated_wild[:,returned_results['best_layer'],:], projection), -1, keepdims=True) - assert scores.shape[1] == 1 - best_scores = np.sqrt(np.sum(np.square(scores), axis=1)) * returned_results['best_sign'] - - - - # direct projection - feat_indices_test = [] - - for i in range(length): - if i not in wild_q_indices: - feat_indices_test.extend(np.arange(1 * i, 1 * i + 1).tolist()) - if feat_loc == 3: - embed_generated_test = embed_generated[feat_indices_test][:, 1:, :] - else: - embed_generated_test = embed_generated[feat_indices_test] - - test_scores = np.mean(np.matmul(embed_generated_test[:,returned_results['best_layer'],:], - projection), -1, keepdims=True) - - assert test_scores.shape[1] == 1 - test_scores = np.sqrt(np.sum(np.square(test_scores), axis=1)) - - measures = get_measures(returned_results['best_sign'] * test_scores[gt_label_test == 1], - returned_results['best_sign'] *test_scores[gt_label_test == 0], plot=False) - print_measures(measures[0], measures[1], measures[2], 'direct-projection') - - - thresholds = np.linspace(0,1, num=40)[1:-1] - normalizer = lambda x: x / (np.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-10) - auroc_over_thres = [] - for thres_wild in thresholds: - best_auroc = 0 - for layer in range(len(embed_generated_wild[0])): - thres_wild_score = np.sort(best_scores)[int(len(best_scores) * thres_wild)] - true_wild = embed_generated_wild[:,layer,:][best_scores > thres_wild_score] - false_wild = embed_generated_wild[:,layer,:][best_scores <= thres_wild_score] - - embed_train = np.concatenate([true_wild,false_wild],0) - label_train = np.concatenate([np.ones(len(true_wild)), - np.zeros(len(false_wild))], 0) - - - ## gt training, saplma - # embed_train = embed_generated_wild[:,layer,:] - # label_train = gt_label_wild - ## gt training, saplma - from linear_probe import get_linear_acc - - - - best_acc, final_acc, ( - clf, best_state, best_preds, preds, labels_val), losses_train = get_linear_acc( - embed_train, - label_train, - embed_train, - label_train, - 2, epochs = 50, - print_ret = True, - batch_size=512, - cosine=True, - nonlinear = True, - learning_rate = 0.05, - weight_decay = 0.0003) - - - - clf.eval() - output = clf(torch.from_numpy( - embed_generated_test[:, layer, :]).cuda()) - pca_wild_score_binary_cls = torch.sigmoid(output) - - - pca_wild_score_binary_cls = pca_wild_score_binary_cls.cpu().data.numpy() - - if np.isnan(pca_wild_score_binary_cls).sum() > 0: - breakpoint() - measures = get_measures(pca_wild_score_binary_cls[gt_label_test == 1], - pca_wild_score_binary_cls[gt_label_test == 0], plot=False) - - if measures[0] > best_auroc: - best_auroc = measures[0] - best_result = [100 * measures[0]] - best_layer = layer - - auroc_over_thres.append(best_auroc) - print('thres: ', thres_wild, 'best result: ', best_result, 'best_layer: ', best_layer) - - - -if __name__ == '__main__': - seed_everything(42) - main() \ No newline at end of file diff --git a/hal_generate.py b/hal_generate.py deleted file mode 100644 index 41406f5..0000000 --- a/hal_generate.py +++ /dev/null @@ -1,463 +0,0 @@ -import os -import time -import torch -import torch.nn.functional as F -import evaluate -from datasets import load_metric -from datasets import load_dataset -import datasets -from tqdm import tqdm -import numpy as np -import pickle -# from utils import get_llama_activations_bau, tokenized_tqa, tokenized_tqa_gen, tokenized_tqa_gen_end_q -from utils import get_hal_prompt, get_qa_prompt, get_truth_prompt -import llama_iti -import pickle -import argparse -import matplotlib.pyplot as plt -from pprint import pprint -from baukit import Trace, TraceDict -from metric_utils import get_measures, print_measures -import re -from torch.autograd import Variable -from openai import OpenAI -import openai - -API={ - 'gpt-3.5-turbo':{'base_url':"https://api.agicto.cn/v1",'key':''}, - 'deepseek-chat':{'base_url':"https://api.deepseek.com/v1",'key':'sk-5f06261529bb44df86d9b2fdbae1a6b5'}, - 'qwen-plus':{'base_url':"https://dashscope.aliyuncs.com/compatible-mode/v1",'key':'sk-5be20597fa574155a9e56d7df1acfc7f'}, - 'step-1-8k':{'base_url':"https://api.stepfun.com/v1",'key':'2hqEtnMCWe5cugi1mAVWRZat5hydLFG8tEJWPRW5XnxglpWxRBp5W0M0dvPAFXhC3'}, - 'moonshot-v1-8k':{'base_url':"https://api.moonshot.cn/v1",'key':'sk-8zjQm3CMAI7qQUWYLgFxSCCQxCOkVfuSkRcs6kNxUZY2L4aV'}, - 'ERNIE-3.5-8K':{'base_url':"https://api.agicto.cn/v1",'key':'sk-BmLsx7BClpqtmIwxLNB5pH5lJ36WJ7GxiV3nV5PiwF7Iwauf'}, - -} - -def seed_everything(seed: int): - import random, os - import numpy as np - import torch - - random.seed(seed) - os.environ['PYTHONHASHSEED'] = str(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = True - -HF_NAMES = { - 'llama_7B': 'baffo32/decapoda-research-llama-7B-hf', - 'honest_llama_7B': 'validation/results_dump/llama_7B_seed_42_top_48_heads_alpha_15', - 'alpaca_7B': 'circulus/alpaca-7b', - 'vicuna_7B': 'AlekseyKorshuk/vicuna-7b', - 'llama2_chat_7B': 'models/Llama-2-7b-chat-hf', - 'llama2_chat_13B': 'models/Llama-2-13b-chat-hf', - 'llama2_chat_70B': 'meta-llama/Llama-2-70b-chat-hf', -} - - -def main(): - - - parser = argparse.ArgumentParser() - parser.add_argument('--model_name', type=str, default='step-1-8k') - parser.add_argument('--dataset_name', type=str, default='triviaqa') - parser.add_argument('--num_gene', type=int, default=1) - parser.add_argument('--use_api', type=bool, default=True) - parser.add_argument('--most_likely', type=bool, default=True) - parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data') - parser.add_argument("--instruction", type=str, default='/home/liwenyun/code/haloscope/generation/qa/qa_one-turn_instruction.txt', help='local directory of instruction file.') - args = parser.parse_args() - - - if args.use_api: - # openai.api_base=API[args.model_name]['base_url'] - # openai.api_key=API[args.model_name]['key'] - client = OpenAI( - api_key = API[args.model_name]['key'], - base_url = API[args.model_name]['base_url'], - ) - - else: - MODEL = HF_NAMES[args.model_name] if not args.model_dir else args.model_dir - - - - - if args.dataset_name == "tqa": - dataset = load_dataset("truthful_qa", 'generation')['validation'] - elif args.dataset_name == 'triviaqa': - dataset = load_dataset("trivia_qa", "rc.nocontext", split="validation") - id_mem = set() - - def remove_dups(batch): - if batch['question_id'][0] in id_mem: - return {_: [] for _ in batch.keys()} - id_mem.add(batch['question_id'][0]) - return batch - - dataset = dataset.map(remove_dups, batch_size=1, batched=True, load_from_cache_file=False) - elif args.dataset_name == 'tydiqa': - dataset = datasets.load_dataset("tydiqa", "secondary_task", split="train") - used_indices = [] - for i in range(len(dataset)): - if 'english' in dataset[i]['id']: - used_indices.append(i) - elif args.dataset_name == 'coqa': - import json - import pandas as pd - from datasets import Dataset - - def _save_dataset(): - # https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py - save_path = f'./coqa_dataset' - if not os.path.exists(save_path): - # https://downloads.cs.stanford.edu/nlp/data/coqa/coqa-dev-v1.0.json - with open(f'./coqa-dev-v1.0.json', 'r') as infile: - data = json.load(infile)['data'] - - dataset = {} - - dataset['story'] = [] - dataset['question'] = [] - dataset['answer'] = [] - dataset['additional_answers'] = [] - dataset['id'] = [] - - for sample_id, sample in enumerate(data): - story = sample['story'] - questions = sample['questions'] - answers = sample['answers'] - additional_answers = sample['additional_answers'] - for question_index, question in enumerate(questions): - dataset['story'].append(story) - dataset['question'].append(question['input_text']) - dataset['answer'].append({ - 'text': answers[question_index]['input_text'], - 'answer_start': answers[question_index]['span_start'] - }) - dataset['id'].append(sample['id'] + '_' + str(question_index)) - additional_answers_list = [] - - for i in range(3): - additional_answers_list.append(additional_answers[str(i)][question_index]['input_text']) - - dataset['additional_answers'].append(additional_answers_list) - story = story + ' Q: ' + question['input_text'] + ' A: ' + answers[question_index]['input_text'] - if not story[-1] == '.': - story = story + '.' - - dataset_df = pd.DataFrame.from_dict(dataset) - - dataset = Dataset.from_pandas(dataset_df) - - dataset.save_to_disk(save_path) - return save_path - - # dataset = datasets.load_from_disk(_save_dataset()) - def get_dataset(tokenizer, split='validation'): - # from https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py - dataset = datasets.load_from_disk(_save_dataset()) - id_to_question_mapping = dict(zip(dataset['id'], dataset['question'])) - - def encode_coqa(example): - example['answer'] = [example['answer']['text']] + example['additional_answers'] - example['prompt'] = prompt = example['story'] + ' Q: ' + example['question'] + ' A:' - return tokenizer(prompt, truncation=False, padding=False) - - dataset = dataset.map(encode_coqa, batched=False, load_from_cache_file=False) - dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'], output_all_columns=True) - return dataset - - dataset = get_dataset(llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True)) - else: - raise ValueError("Invalid dataset name") - f = open(args.instruction, 'r', encoding="utf-8") - instruction = f.read() - error_output='No output' - - if args.use_api: - begin_index = 0 - if args.dataset_name == 'tydiqa': - end_index = len(used_indices) - else: - end_index = len(dataset) - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}/'): - os.mkdir(f'./save_for_eval/{args.dataset_name}/') - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/'): - os.mkdir(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/') - - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers'): - os.mkdir(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers') - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/hallucinations'): - os.mkdir(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/hallucinations') - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/truths'): - os.mkdir(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/truths') - - - for i in range(begin_index, end_index): - answers = [None] * args.num_gene - hallucinations= [None] * args.num_gene - truths = [None] * args.num_gene - if args.dataset_name == 'tydiqa': - question = dataset[int(used_indices[i])]['question'] - prompt = get_qa_prompt(dataset[int(used_indices[i])]['context'],question) - hallucination_prompt=get_hal_prompt(dataset[int(used_indices[i])]['context'],question,instruction) - truth_prompt=get_truth_prompt(dataset[int(used_indices[i])]['context'],question) - elif args.dataset_name == 'triviaqa': - prompt = get_qa_prompt("None",dataset[i]['question']) - question= dataset[i]['question'] - hallucination_prompt=get_hal_prompt("None",dataset[i]['question'],instruction) - truth_prompt=get_truth_prompt("None",question) - elif args.dataset_name == 'coqa': - prompt = get_qa_prompt("None",dataset[i]['prompt']) - hallucination_prompt=get_hal_prompt("None",dataset[i]['prompt'],instruction) - else: - question = dataset[i]['question'] - prompt = get_qa_prompt("None",question) - hallucination_prompt=get_hal_prompt("None",question,instruction) - - for gen_iter in range(args.num_gene): - if args.most_likely: - try: - response = client.chat.completions.create( - model = args.model_name, - messages = prompt, - max_tokens=256, - top_p=1, - temperature = 1, - ) - decoded=response.choices[0].message.content - except openai.APIStatusError as e: - print("error occured!"+str(gen_iter)+"responce {e}") - decoded = error_output - try: - hallucination_response = client.chat.completions.create( - model = args.model_name, - messages = hallucination_prompt, - max_tokens=256, - top_p=1, - temperature = 1, - ) - hallucination_decoded=hallucination_response.choices[0].message.content - except openai.APIStatusError as e: - print("error occured!"+str(gen_iter)+"hallucination_responce {e}") - hallucination_decoded = error_output - if args.dataset_name == 'tydiqa' or args.dataset_name == 'tydiqa': - try: - truth_response=client.chat.completions.create( - model = args.model_name, - messages = truth_prompt, - max_tokens=256, - top_p=1, - temperature=1 - ) - truth_decoded=truth_response.choices[0].message.content - except openai.APIStatusError as e: - print("error occured!"+str(gen_iter)+"truth_responce {e}") - truth_decoded =error_output - - - - else: - response = client.chat.completions.create( - model = args.model_name, - messages = prompt, - max_tokens=256, - n=1, - # best_of=1, - top_p=0.5, - temperature = 0.5, - ) - - hallucination_response = client.chat.completions.create( - model = args.model_name, - messages = hallucination_prompt, - max_tokens=256, - n=1, - # best_of=1, - top_p=0.5, - temperature = 0.5, - ) - if args.dataset_name == 'tydiqa' or args.dataset_name == 'tydiqa': - truth_response=client.chat.completions.create( - model = args.model_name, - messages = truth_prompt, - top_p=0.5, - temperature = 0.5, - ) - truth_decoded=truth_response.choices[0].message.content - decoded=response.choices[0].message.content - hallucination_decoded=hallucination_response.choices[0].message.content - time.sleep(40) - - - # decoded = tokenizer.decode(generated[0, prompt.shape[-1]:], - # skip_special_tokens=True) - if args.dataset_name == 'tqa' or args.dataset_name == 'triviaqa': - # corner case. - if 'Answer the question concisely' in decoded: - decoded = decoded.split('Answer the question concisely')[0] - if 'Answer the question concisely' in hallucination_decoded: - hallucination_decoded = hallucination_decoded.split('Answer the question concisely')[0] - if args.dataset_name == 'coqa': - if 'Q:' in decoded: - decoded = decoded.split('Q:')[0] - if 'Q:' in hallucination_decoded: - hallucination_decoded = hallucination_decoded.split('Q:')[0] - answers[gen_iter] = decoded - hallucinations[gen_iter]=hallucination_decoded - if args.dataset_name == 'tydiqa' or args.dataset_name == 'tydiqa': - truths[gen_iter]=truth_decoded - - - if args.dataset_name == 'coqa': - truths=[dataset[i]['answer']]+dataset[i]['additional_answers'] - truths=truths[:args.num_gene] - elif args.dataset_name == 'tqa': - truths=[dataset[i]['best_answer']]+dataset[i]['correct_answers'] - truths=truths[:args.num_gene] - else: - assert 'Not supported dataset!' - - print('sample: ', i) - if args.most_likely: - info = 'most_likely_' - else: - info = 'batch_generations_' - print("Saving answers") - np.save(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy', - answers) - print("Saving hallucinations") - np.save(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/hallucinations/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_hallucinations_index_{i}.npy', - hallucinations) - print("Saving truths") - np.save(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/truths/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_truths_index_{i}.npy', - truths) - - else: - tokenizer = llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True) - model = llama_iti.LlamaForCausalLM.from_pretrained(MODEL, low_cpu_mem_usage=True, torch_dtype=torch.float16, - device_map="auto").cuda() - - begin_index = 0 - if args.dataset_name == 'tydiqa': - end_index = len(used_indices) - else: - end_index = len(dataset) - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/'): - os.mkdir(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/') - - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers'): - os.mkdir(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers') - - if not os.path.exists(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/hallucinations'): - os.mkdir(f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/hallucinations') - - period_token_id = [tokenizer(_)['input_ids'][-1] for _ in ['\n']] - period_token_id += [tokenizer.eos_token_id] - - for i in range(begin_index, end_index): - answers = [None] * args.num_gene - hallucinations= [None] * args.num_gene - if args.dataset_name == 'tydiqa': - question = dataset[int(used_indices[i])]['question'] - prompt = tokenizer( - "Concisely answer the following question based on the information in the given passage: \n" + \ - " Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:", - return_tensors='pt').input_ids.cuda() - hallucination_prompt=tokenizer( - get_hal_prompt(dataset[int(used_indices[i])]['context'],question,instruction), return_tensors='pt' - ).input_ids.cuda() - elif args.dataset_name == 'coqa': - prompt = tokenizer( - dataset[i]['prompt'], return_tensors='pt').input_ids.cuda() - # hallucination_prompt=get_hal_prompt("None",dataset[i]['prompt'],instruction) - hallucination_prompt=tokenizer( - get_hal_prompt("None",dataset[i]['prompt'],instruction) , return_tensors='pt' - ).input_ids.cuda() - else: - question = dataset[i]['question'] - prompt = tokenizer(f"Answer the question concisely. Q: {question}" + " A:", return_tensors='pt').input_ids.cuda() - # hallucination_prompt=get_hal_prompt("None",question,instruction) - hallucination_prompt=tokenizer( - get_hal_prompt("None",question,instruction), return_tensors='pt' - ).input_ids.cuda() - for gen_iter in range(args.num_gene): - if args.most_likely: - generated = model.generate(prompt, - num_beams=5, - num_return_sequences=1, - do_sample=False, - max_new_tokens=128, - ) - hallucination_generated=model.generate(hallucination_prompt, - num_beams=5, - num_return_sequences=1, - do_sample=False, - max_new_tokens=128, - ) - else: - generated = model.generate(prompt, - do_sample=True, - num_return_sequences=1, - num_beams=1, - max_new_tokens=128, - temperature=0.5, - top_p=1.0) - hallucination_generated=model.generate(hallucination_prompt, - do_sample=True, - num_return_sequences=1, - num_beams=1, - max_new_tokens=128, - temperature=0.5, - top_p=1.0) - - decoded = tokenizer.decode(generated[0, prompt.shape[-1]:], - skip_special_tokens=True) - hallucination_decoded=tokenizer.decode(hallucination_generated[0, prompt.shape[-1]:], - skip_special_tokens=True) - if args.dataset_name == 'tqa' or args.dataset_name == 'triviaqa': - # corner case. - if 'Answer the question concisely' in decoded: - decoded = decoded.split('Answer the question concisely')[0] - if 'Answer the question concisely' in hallucination_decoded: - hallucination_decoded = hallucination_decoded.split('Answer the question concisely')[0] - if args.dataset_name == 'coqa': - if 'Q:' in decoded: - decoded = decoded.split('Q:')[0] - if 'Q:' in hallucination_decoded: - hallucination_decoded = hallucination_decoded.split('Q:')[0] - answers[gen_iter] = decoded - hallucinations[gen_iter]=hallucination_decoded - - - print('sample: ', i) - if args.most_likely: - info = 'most_likely_' - else: - info = 'batch_generations_' - print("Saving answers") - np.save(f'./save_for_eval/{args.dataset_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy', - answers) - print("Saving hallucinations") - np.save(f'./save_for_eval/{args.dataset_name}_hal_det/hallucinations/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_hallucinations_index_{i}.npy', - hallucinations) - - - # get the split and label (true or false) of the unlabeled data and the test data. - - - - -if __name__ == '__main__': - seed_everything(42) - main() \ No newline at end of file diff --git a/hal_gt.py b/hal_gt.py deleted file mode 100644 index 2597265..0000000 --- a/hal_gt.py +++ /dev/null @@ -1,268 +0,0 @@ -import os -import torch -import torch.nn.functional as F -import evaluate -from datasets import load_metric -from datasets import load_dataset -import datasets -from tqdm import tqdm -import numpy as np -import pickle -import llama_iti -import pickle -import argparse -import matplotlib.pyplot as plt -from pprint import pprint -from baukit import Trace, TraceDict -from metric_utils import get_measures, print_measures -import re -from torch.autograd import Variable -from bleurt_pytorch import BleurtConfig, BleurtForSequenceClassification, BleurtTokenizer - - - -def seed_everything(seed: int): - import random, os - import numpy as np - import torch - - random.seed(seed) - os.environ['PYTHONHASHSEED'] = str(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = True - -HF_NAMES = { - 'llama_7B': 'baffo32/decapoda-research-llama-7B-hf', - 'honest_llama_7B': 'validation/results_dump/llama_7B_seed_42_top_48_heads_alpha_15', - 'alpaca_7B': 'circulus/alpaca-7b', - 'vicuna_7B': 'AlekseyKorshuk/vicuna-7b', - 'llama2_chat_7B': 'models/Llama-2-7b-chat-hf', - 'llama2_chat_13B': 'models/Llama-2-13b-chat-hf', - 'llama2_chat_70B': 'meta-llama/Llama-2-70b-chat-hf', -} - - -def main(): - - - parser = argparse.ArgumentParser() - parser.add_argument('--model', type=str, default='llama2_chat_7B') - parser.add_argument('--model_name', type=str, default='moonshot-v1-8k') - parser.add_argument('--dataset_name', type=str, default='tqa') - parser.add_argument('--num_gene', type=int, default=1) - parser.add_argument('--use_api', type=bool, default=False) - parser.add_argument('--most_likely', type=bool, default=False) - parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data') - parser.add_argument("--instruction", type=str, default=None, help='local directory of instruction file.') - parser.add_argument('--use_rouge', type=bool, default=True) - parser.add_argument('--thres_gt', type=float, default=0.5) - - # parser.add_argument('--model_name', type=str, default='llama2_chat_7B') - # parser.add_argument('--dataset_name', type=str, default='triviaqa') - # parser.add_argument('--num_gene', type=int, default=1) - # parser.add_argument('--gene', type=int, default=0) - # parser.add_argument('--generate_gt', type=int, default=0) - # parser.add_argument('--use_rouge', type=int, default=0) - # parser.add_argument('--weighted_svd', type=int, default=0) - # parser.add_argument('--feat_loc_svd', type=int, default=0) - # parser.add_argument('--wild_ratio', type=float, default=0.75) - # parser.add_argument('--thres_gt', type=float, default=0.5) - # parser.add_argument('--most_likely', type=int, default=0) - - # parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data') - args = parser.parse_args() - - MODEL = HF_NAMES[args.model] if not args.model_dir else args.model_dir - - - - - if args.dataset_name == "tqa": - dataset = load_dataset("truthful_qa", 'generation')['validation'] - elif args.dataset_name == 'triviaqa': - dataset = load_dataset("trivia_qa", "rc.nocontext", split="validation") - id_mem = set() - - def remove_dups(batch): - if batch['question_id'][0] in id_mem: - return {_: [] for _ in batch.keys()} - id_mem.add(batch['question_id'][0]) - return batch - - dataset = dataset.map(remove_dups, batch_size=1, batched=True, load_from_cache_file=False) - elif args.dataset_name == 'tydiqa': - dataset = datasets.load_dataset("tydiqa", "secondary_task", split="train") - used_indices = [] - for i in range(len(dataset)): - if 'english' in dataset[i]['id']: - used_indices.append(i) - elif args.dataset_name == 'coqa': - import json - import pandas as pd - from datasets import Dataset - - def _save_dataset(): - # https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py - save_path = f'./coqa_dataset' - if not os.path.exists(save_path): - # https://downloads.cs.stanford.edu/nlp/data/coqa/coqa-dev-v1.0.json - with open(f'./coqa-dev-v1.0.json', 'r') as infile: - data = json.load(infile)['data'] - - dataset = {} - - dataset['story'] = [] - dataset['question'] = [] - dataset['answer'] = [] - dataset['additional_answers'] = [] - dataset['id'] = [] - - for sample_id, sample in enumerate(data): - story = sample['story'] - questions = sample['questions'] - answers = sample['answers'] - additional_answers = sample['additional_answers'] - for question_index, question in enumerate(questions): - dataset['story'].append(story) - dataset['question'].append(question['input_text']) - dataset['answer'].append({ - 'text': answers[question_index]['input_text'], - 'answer_start': answers[question_index]['span_start'] - }) - dataset['id'].append(sample['id'] + '_' + str(question_index)) - additional_answers_list = [] - - for i in range(3): - additional_answers_list.append(additional_answers[str(i)][question_index]['input_text']) - - dataset['additional_answers'].append(additional_answers_list) - story = story + ' Q: ' + question['input_text'] + ' A: ' + answers[question_index]['input_text'] - if not story[-1] == '.': - story = story + '.' - - dataset_df = pd.DataFrame.from_dict(dataset) - - dataset = Dataset.from_pandas(dataset_df) - - dataset.save_to_disk(save_path) - return save_path - - # dataset = datasets.load_from_disk(_save_dataset()) - def get_dataset(tokenizer, split='validation'): - # from https://github.com/lorenzkuhn/semantic_uncertainty/blob/main/code/parse_coqa.py - dataset = datasets.load_from_disk(_save_dataset()) - id_to_question_mapping = dict(zip(dataset['id'], dataset['question'])) - - def encode_coqa(example): - example['answer'] = [example['answer']['text']] + example['additional_answers'] - example['prompt'] = prompt = example['story'] + ' Q: ' + example['question'] + ' A:' - return tokenizer(prompt, truncation=False, padding=False) - - dataset = dataset.map(encode_coqa, batched=False, load_from_cache_file=False) - dataset.set_format(type='torch', columns=['input_ids', 'attention_mask'], output_all_columns=True) - return dataset - - dataset = get_dataset(llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True)) - else: - raise ValueError("Invalid dataset name") - - model = BleurtForSequenceClassification.from_pretrained('lucadiliello/BLEURT-20').cuda() - tokenizer = BleurtTokenizer.from_pretrained('lucadiliello/BLEURT-20') - model.eval() - - - # elif args.generate_gt: - # from bleurt_pytorch import BleurtConfig, BleurtForSequenceClassification, BleurtTokenizer - - - - rouge = evaluate.load('rouge') - gts = np.zeros(0) - if args.dataset_name == 'tydiqa': - length = len(used_indices) - else: - length = len(dataset) - for i in range(length): - if args.dataset_name == 'tqa': - best_answer = dataset[i]['best_answer'] - correct_answer = dataset[i]['correct_answers'] - all_answers = [best_answer] + correct_answer - elif args.dataset_name == 'triviaqa': - all_answers = dataset[i]['answer']['aliases'] - elif args.dataset_name == 'coqa': - all_answers = dataset[i]['answer'] - elif args.dataset_name == 'tydiqa': - all_answers = dataset[int(used_indices[i])]['answers']['text'] - - if args.most_likely: - answers = np.load( - f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/most_likely_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') - else: - answers = np.load( - f'./save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/batch_generations_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') - # get the gt. - if args.use_rouge: - - predictions = answers - all_results = np.zeros((len(all_answers), len(predictions))) - all_results1 = np.zeros((len(all_answers), len(predictions))) - all_results2 = np.zeros((len(all_answers), len(predictions))) - for anw in range(len(all_answers)): - results = rouge.compute(predictions=predictions, - references=[all_answers[anw]] * len(predictions), - use_aggregator=False) - all_results[anw] = results['rougeL'] - all_results1[anw] = results['rouge1'] - all_results2[anw] = results['rouge2'] - - # breakpoint() - gts = np.concatenate([gts, np.max(all_results, axis=0)], 0) - - if i % 50 == 0: - print("samples passed: ", i) - else: - - predictions = answers - all_results = np.zeros((len(all_answers), len(predictions))) - with torch.no_grad(): - for anw in range(len(all_answers)): - inputs = tokenizer(predictions.tolist(), [all_answers[anw]] * len(predictions), - padding='longest', return_tensors='pt') - for key in list(inputs.keys()): - inputs[key] = inputs[key].cuda() - res = np.asarray(model(**inputs).logits.flatten().tolist()) - all_results[anw] = res - gts = np.concatenate([gts, np.max(all_results, axis=0)], 0) - if i % 10 == 0: - print("samples passed: ", i) - # breakpoint() - if args.most_likely: - if args.use_rouge: - np.save(f'./ml_{args.dataset_name}_{args.model_name}_rouge_score.npy', gts) - else: - np.save(f'./ml_{args.dataset_name}_{args.model_name}_bleurt_score.npy', gts) - else: - if args.use_rouge: - np.save(f'./bg_{args.dataset_name}_{args.model_name}_rouge_score.npy', gts) - else: - np.save(f'./bg_{args.dataset_name}_{args.model_name}_bleurt_score.npy', gts) - - - - - - - - - - - - - - -if __name__ == '__main__': - seed_everything(42) - main() \ No newline at end of file diff --git a/jailbreak_llama.py b/jailbreak_llama.py index e01e782..65f21db 100644 --- a/jailbreak_llama.py +++ b/jailbreak_llama.py @@ -86,20 +86,19 @@ def main(): device_map="auto").cuda() HEADS = [f"model.layers.{i}.self_attn.head_out" for i in range(model.config.num_hidden_layers)] MLPS = [f"model.layers.{i}.mlp" for i in range(model.config.num_hidden_layers)] - # firstly get the embeddings of the generated question and answers. - # embed_generated = [] + benign_embed_generated =[] - benign_embed_generated_loc1 =[] + benign_embed_generated_loc1 =[] benign_embed_generated_loc2 =[] - adverse_embed_generated=[] - adverse_embed_generated_loc1=[] - adverse_embed_generated_loc2=[] + adverse_embed_generated=[] #layer-wise representations + adverse_embed_generated_loc1=[] #mlp-wise representations + adverse_embed_generated_loc2=[] #head-wise representations with open('benign.json') as f: benign_answers = json.load(f) - length = int(len(dataset)*0.45) + length = int(len(dataset)*0.55) for i in tqdm(range(length)): question = dataset[i]['query'] adversary = dataset[i]['target'] @@ -156,8 +155,7 @@ def main(): np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_jailbreak/' + f'{args.model_name}_benign_embeddings_mlp_wise.npy', benign_embed_generated_loc1) np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_jailbreak/' + f'{args.model_name}_adverse_embeddings_head_wise.npy', adverse_embed_generated_loc2) np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_jailbreak/' + f'{args.model_name}_adverse_embeddings_mlp_wise.npy', adverse_embed_generated_loc1) - # np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_jailbreak/' + f'{args.model_name}_gene_embeddings_t_head_wise.npy', embed_generated_t_loc2) - # np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_jailbreak/' + f'{args.model_name}_gene_embeddings_t_mlp_wise.npy', embed_generated_t_loc1) + diff --git a/llm_layers.py b/llm_layers.py new file mode 100644 index 0000000..cdd9fd6 --- /dev/null +++ b/llm_layers.py @@ -0,0 +1,229 @@ +import torch +from torch.nn import functional as F +from torch import nn +from transformers import PreTrainedModel +from torch import Tensor +import numpy as np +from typing import Optional, Tuple +from cache_utils import Cache +from transformers.activations import ACT2FN + +class LlamaDecoderLayerWrapper(nn.Module): + def __init__(self, llama_decoder_layer, tsv_layer, model_name='llama3.1-8B'): + super().__init__() + self.llama_decoder_layer = llama_decoder_layer + self.tsv_layer = tsv_layer # Instance of ICVLayer + self.model_name = model_name + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + **kwargs, + )-> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + # Save original residual state + residual = hidden_states + + # Forward pass through the input layer norm + hidden_states = self.llama_decoder_layer.input_layernorm(hidden_states) + + + if self.model_name == 'qwen2.5-7B': + hidden_states, self_attn_weights, present_key_value = self.llama_decoder_layer.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + + else: + hidden_states, self_attn_weights, present_key_value = self.llama_decoder_layer.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + position_embeddings=position_embeddings, + **kwargs, + ) + + # Add residual + steering vector after self-attention + hidden_states = residual.to(hidden_states.device) + hidden_states + + + # Save residual state for the MLP + residual = hidden_states + + # Forward pass through the post-attention layer norm and MLP + hidden_states = self.llama_decoder_layer.post_attention_layernorm(hidden_states) + hidden_states = self.llama_decoder_layer.mlp(hidden_states) + + # Add residual + steering vector after MLP + hidden_states = residual + hidden_states + hidden_states = self.tsv_layer(hidden_states) # Add steering vector + + # Return the outputs + outputs = (hidden_states,) + if output_attentions: + outputs += (self_attn_weights,) + if use_cache: + outputs += (present_key_value,) + + return outputs + +class SVLayer(nn.Module): + + def __init__(self, sv, lam): + super(SVLayer, self).__init__() + self.sv = sv + self.lam = lam + + def forward(self, x): + if self.tv is not None: + + x = x.half() + y = self.lam[0] * self.sv.repeat(1,x.shape[1],1) + y = y.to(x.device) + x = x.half() + y + + return x.half() + + else: + + return x.half() + + +def get_nested_attr(obj, attr_path): + attrs = attr_path.split(".") + for attr in attrs: + obj = getattr(obj, attr) + return obj + + +def set_nested_attr(obj, attr_path, value): + attrs = attr_path.split(".") + parent = get_nested_attr(obj, ".".join(attrs[:-1])) + setattr(parent, attrs[-1], value) + + +def find_longest_modulelist(model, path=""): + """ + Recursively find the longest nn.ModuleList in a PyTorch model. + Args: + model: PyTorch model. + path: Current path in the model (used for recursion). + Returns: + Tuple with path and length of the longest nn.ModuleList found. + """ + longest_path = path + longest_len = 0 + + for name, child in model.named_children(): + if isinstance(child, nn.ModuleList) and len(child) > longest_len: + longest_len = len(child) + longest_path = f"{path}.{name}" if path else name + + # Recursively check the child's children + child_path, child_len = find_longest_modulelist(child, f"{path}.{name}" if path else name) + if child_len > longest_len: + longest_len = child_len + longest_path = child_path + + return longest_path, longest_len + + +def find_module(block, keywords): + """ + Try to find a module in a transformer block. + Args: + block: Transformer block (nn.Module). + keywords: List of possible module names (str). + Returns: + The found module if found, else None. + """ + + for name, module in block.named_modules(): + if any(keyword in name for keyword in keywords): + return module + submodule_names = [name for name, _ in block.named_modules()] + raise ValueError(f"Could not find keywords {keywords} in: {submodule_names}") + + +def get_embedding_layer(model: PreTrainedModel): + + keywords = ["emb", "wte"] + return find_module(model, keywords) + + +def get_lm_head(model: PreTrainedModel): + keywords = ["lm_head", "embed_out"] + return find_module(model, keywords) + + +def get_lm_pipeline(model: PreTrainedModel): + model_class = model.__class__.__name__ + + if model_class == "LlamaForCausalLM": + return nn.Sequential(model.model.norm, model.lm_head) + elif model_class == "RWForCausalLM": + return nn.Sequential(model.transformer.ln_f, model.lm_head) + elif model_class == "GPTNeoForCausalLM": + return nn.Sequential(model.transformer.ln_f, model.lm_head) + elif model_class == "GPTNeoXForCausalLM": + return nn.Sequential(model.gpt_neox.final_layer_norm, model.embed_out) + + # TODO: make the default case more robust + return get_lm_head(model) + + +def get_layers_path(model: PreTrainedModel): + longest_path, longest_len = find_longest_modulelist(model) + return longest_path + + +def get_layers(model: PreTrainedModel): + longest_path = get_layers_path(model) + return get_nested_attr(model, longest_path) + +def get_mlp_layers(model: PreTrainedModel): + layers = get_layers(model) + mlp_keywords = ["mlp", "feedforward", "ffn"] + mlp_layers = [find_module(layer, mlp_keywords) for layer in layers] + return mlp_layers + +def add_sv_layers(model: PreTrainedModel, tsv: Tensor, alpha: list, args): + layers = get_layers(model) + mlp_keywords = ["mlp", "feedforward", "ffn"] + attn_keywords = ["self_attn"] + + assert len(tsv) == len(layers) + if args.component == 'mlp': + for i, layer in enumerate(layers): + if i == args.str_layer: + original_mlp = find_module(layer, mlp_keywords) + layer.mlp = nn.Sequential(original_mlp, SVLayer(tsv[i], alpha)) + + elif args.component == 'attn': + for i, layer in enumerate(layers): + if i == args.str_layer: + original_attn = find_module(layer, attn_keywords) + layer.self_attn = nn.Sequential(original_attn, SVLayer(tsv[i], alpha)) + + elif args.component == 'res': + + for i, layer in enumerate(layers): + if i == args.str_layer: + decoder_layer = layers[i] + layers[i] = LlamaDecoderLayerWrapper(decoder_layer, SVLayer(tsv[i], alpha), args.model_name) \ No newline at end of file diff --git a/steer_vector.py b/steer_vector.py new file mode 100644 index 0000000..37d7a0a --- /dev/null +++ b/steer_vector.py @@ -0,0 +1,767 @@ +import os +import torch +import torch.nn as nn +from datasets import load_dataset +from tqdm import tqdm +import numpy as np +import argparse +from train_utils import get_last_non_padded_token_rep, compute_ot_loss_cos, update_centroids_ema, update_centroids_ema_hard, get_ex_data, collate_fn +from transformers import AutoTokenizer, AutoModelForCausalLM +from llm_layers import add_sv_layers +from sklearn.metrics import roc_auc_score +from torch.cuda.amp import autocast, GradScaler +import torch.nn.functional as F +from sinkhorn_knopp import SinkhornKnopp_imb +import logging + + +def seed_everything(seed: int): + import random, os + import numpy as np + import torch + + random.seed(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = True + + +def train_model(model, optimizer, device, prompts, labels, args): + """ + 对应论文里的两阶段训练: + - Phase 1: 使用人工标注的 exemplar 做 initial training(式 (3)(4)(5)(6)) + - Phase 2: 使用 OT + Sinkhorn 选出来的伪标签数据做 self-training(Sec.3.3 pseudo-labeling) + + 参数说明: + - model: 冻结好的 LLM + 已经插入 TSV 层的模型(只训练 TSV) + - optimizer: 只优化 TSV 的 AdamW + - device: cuda + - prompts: [test_prompts, train_prompts, exemplar_prompts] + - labels: 对应三块的标签 [test_labels, train_labels, exemplar_labels] + - args: 超参数(学习率、epoch 数、Sinkhorn 参数等) + """ + + layer_number = -1 # 使用最后一层 hidden state(这里 -1 当索引) + + # ========= 日志 & 结果保存目录 ========= + dir_name = f"TSV_{args.model_name}_{args.dataset_name}/exemplar_num_{args.num_exemplars}_num_selected_data_{args.num_selected_data}/{args.component}/{args.str_layer}/{args.lam}" + log_dir = f"/{dir_name}/" + log_file = os.path.join(log_dir, f"log.txt") + os.makedirs(dir_name, exist_ok=True) + + logging.basicConfig( + filename=log_file, + filemode="w", + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(message)s", + ) + + logging.info("Starting training") + logging.info( + f"Training parameters: few_shot_size={args.num_exemplars}, " + f"num_selected_data={args.num_selected_data}, " + f"component={args.component}, str_layer={args.str_layer}" + ) + + # 解包数据:test / wild train / exemplar + test_prompts, train_prompts, exemplar_prompts = prompts[0], prompts[1], prompts[2] + test_labels, train_labels, exemplar_labels = labels[0], labels[1], labels[2] + + batch_size = args.batch_size + losses = [] + best_test_auroc = -1 + + scaler = GradScaler() # 混合精度的梯度缩放器 + + num_exemplars = args.num_exemplars + + # ========= Sinkhorn OT 相关初始化(伪标签阶段要用)========= + args.num_iters_sk = 3 # Sinkhorn 迭代次数(论文 Eq.(8)(9)) + args.epsilon_sk = 0.05 # OT 熵正则 ε(论文中也设为 0.05) + + # 用 exemplar 的标签估计类分布 w(truthful/hallu 的先验) + # ex_hallu = P(hallucinated), ex_true = P(truthful) + ex_hallu = (num_exemplars - exemplar_labels[:num_exemplars].sum()) / num_exemplars + ex_true = (exemplar_labels[:num_exemplars].sum()) / num_exemplars + cls_dist = torch.tensor([ex_hallu, ex_true]).float().cuda() + cls_dist = cls_dist.view(-1, 1) # 形状 [2, 1] + + # 实例化带类别边缘约束的 Sinkhorn(实现论文里的 OT 问题) + sinkhorn = SinkhornKnopp_imb(args, cls_dist) + + # ========= 初始化两个类别的“原型向量” μ_c ========= + # 这里 centroids 就是论文中球面上的 μ_truthful, μ_hallucinated + centroids = torch.randn((2, model.config.hidden_size)).half().cuda() + centroids = F.normalize(centroids, p=2, dim=1) + + # 把 exemplar 的 prompt/label 整理成一个大 batch(padding) + exemplar_prompts_, exemplar_labels_ = exemplar_prompts, exemplar_labels + exemplar_prompts, exemplar_labels = collate_fn(exemplar_prompts, exemplar_labels) + + # ========= Phase 1: Initial training on exemplars ========= + num_epochs = args.init_num_epochs + + for epoch in range(num_epochs): + running_loss = 0.0 + total = 0 + num_samples = num_exemplars + + # 在 exemplar 上分 batch 训练(对应论文中 D_E 只含人工标注数据) + for batch_start in tqdm( + range(0, num_samples, batch_size), + desc=f"Epoch {epoch+1}/{num_epochs} Batches", + leave=False, + ): + batch_prompts = exemplar_prompts[batch_start: batch_start + batch_size] + batch_labels = exemplar_labels[batch_start: batch_start + batch_size] + + # attention_mask: 1 = valid token, 0 = padding + attention_mask = (batch_prompts != 0).half() + + batch_prompts = batch_prompts.to(device) + batch_labels = batch_labels.to(device) + attention_mask = attention_mask.to(batch_prompts.device) + + # ======= 前向传播:取最后一层最后非 padding token 的表示 r_v ======= + with autocast(dtype=torch.float16): + output = model( + batch_prompts.squeeze(), + attention_mask=attention_mask.squeeze(), + output_hidden_states=True, + ) + + hidden_states = output.hidden_states # tuple(len=L+1) + hidden_states = torch.stack(hidden_states, dim=0).squeeze() + last_layer_hidden_state = hidden_states[layer_number] # [B, T, H] + + # 对应论文里 r_v:最后非 padding token 的 hidden(包含 TSV steer) + last_token_rep = get_last_non_padded_token_rep( + last_layer_hidden_state, attention_mask.squeeze() + ) + + # 把标签变成 one-hot(2 维) + batch_labels_oh = torch.nn.functional.one_hot( + batch_labels, num_classes=-1 # 这里 -1 应该在 utils 里特殊处理成 2 + ) + + # compute_ot_loss_cos:对应 Eq.(5),不过是用 OT 的版本: + # - 通过与 centroids 的余弦距离计算类似 softmax 的分类损失 + # - 里面会用 args.cos_temp 做温度缩放 + ot_loss, similarities = compute_ot_loss_cos( + last_token_rep, centroids, batch_labels_oh, batch_size, args + ) + + loss = ot_loss + total += batch_labels.size(0) + + # ====== 更新类别原型 μ_c(EMA)对应论文 Eq.(6) ====== + with torch.no_grad(): + centroids = update_centroids_ema_hard( + centroids, last_token_rep, batch_labels_oh, args + ) + + # ====== 只对 TSV 反传,LLM 本体是冻结的 ====== + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + + running_loss += loss.item() * batch_labels.size(0) + + # 一个 epoch 的平均 loss + epoch_loss = running_loss / total + + # ====== 在 test 上评估,记录 AUROC ====== + if (epoch + 1) % 1 == 0: + test_labels_ = test_labels + test_predictions, test_labels_combined = test_model( + model, centroids, test_prompts, test_labels_, device, batch_size, layer_number + ) + + test_auroc = roc_auc_score( + test_labels_combined.cpu().numpy(), test_predictions.cpu().numpy() + ) + + print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.4f}") + logging.info(f"Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.4f}") + losses.append(epoch_loss) + + if test_auroc > best_test_auroc: + best_test_auroc = test_auroc + best_test_epoch = epoch + print(f"Best test AUROC: {best_test_auroc:.4f}, at epoch: {best_test_epoch}") + logging.info( + f"Best test AUROC: {best_test_auroc:.4f}, at epoch: {best_test_epoch}" + ) + + logging.info( + f"Epoch [{epoch+1}/{num_epochs}], Train Loss: {epoch_loss:.4f}, " + ) + logging.info(f"Test AUROC: {test_auroc:.4f}") + print(f"Epoch [{epoch+1}/{num_epochs}],Test AUROC: {test_auroc:.4f}") + + # ========= 进入 Phase 2:伪标签 + 自训练 ========= + logging.info(f"SS Learning Starts") + + # 1) get_ex_data: 在 wild train 上做 TSV 推断 + Sinkhorn OT,选出高置信伪标签样本 + with torch.no_grad(): + selected_indices, selected_labels_soft = get_ex_data( + model, + train_prompts, # wild 区间的样本 + train_labels, # 这里一般是无标签 or dummy,这里暂时没用 + batch_size, + centroids, # 当前原型 + sinkhorn, # OT + Sinkhorn 对象,用来求 Q + args.num_selected_data, # 选多少个伪标签样本 + cls_dist, + args, + ) + + num_samples = len(selected_indices) + args.num_exemplars + + num_epochs = args.aug_num_epochs # 自训练的 epoch 数 + + exemplar_label = torch.tensor(exemplar_labels).cuda() + + # 2) 构造“增强后的训练集”:伪标签样本 + 原来的 exemplar + selected_prompts = [train_prompts[i] for i in selected_indices] + selected_labels = selected_labels_soft # 这里已经是 soft label(OT 输出的 q) + + augmented_prompts = selected_prompts + exemplar_prompts_ + exemplar_labels = torch.nn.functional.one_hot( + exemplar_label.to(torch.int64), num_classes=2 + ) + augmented_labels = torch.concat( + (selected_labels, torch.tensor(exemplar_labels).clone().cuda()) + ) + + augmented_prompts_train = augmented_prompts + augmented_labels_label = augmented_labels + + num_samples = len(augmented_prompts_train) + + # 3) 在“伪标签 + exemplar”上再训练一轮 TSV(自训练) + with autocast(dtype=torch.float16): + for epoch in range(num_epochs): + running_loss = 0.0 + total = 0 + all_labels = [] + + for batch_start in tqdm( + range(0, num_samples, batch_size), + desc=f"Epoch {epoch+1}/{num_epochs} Batches", + leave=False, + ): + batch_prompts = augmented_prompts_train[batch_start: batch_start + batch_size] + batch_labels = augmented_labels_label[batch_start: batch_start + batch_size] + + batch_prompts, batch_labels = collate_fn(batch_prompts, batch_labels) + + attention_mask = (batch_prompts != 0).half() + + batch_prompts = batch_prompts.to(device) + batch_labels = batch_labels.to(device) + attention_mask = attention_mask.to(batch_prompts.device) + + output = model( + batch_prompts.squeeze(), + attention_mask=attention_mask.squeeze(), + output_hidden_states=True, + ) + + hidden_states = output.hidden_states + hidden_states = torch.stack(hidden_states, dim=0).squeeze() + last_layer_hidden_state = hidden_states[layer_number] + + last_token_rep = get_last_non_padded_token_rep( + last_layer_hidden_state, attention_mask.squeeze() + ) + + # 这里 compute_ot_loss_cos 接收的是 soft label (OT 得到的 q) + ot_loss, similarities = compute_ot_loss_cos( + last_token_rep, centroids, batch_labels, batch_size, args + ) + + loss = ot_loss + + with torch.no_grad(): + # 自训练阶段,原型更新用的是 soft label 版的 EMA + centroids = update_centroids_ema( + centroids, last_token_rep, batch_labels.half(), args + ) + all_labels.append(batch_labels.cpu()) + total += batch_labels.size(0) + + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + + running_loss += loss.item() * batch_labels.size(0) + + epoch_loss = running_loss / total + + # ====== 用 test set 评估 AUROC ====== + with torch.no_grad(): + all_labels = torch.cat(all_labels).numpy() + test_labels_ = test_labels + + if epoch % 1 == 0: + test_predictions, test_labels_combined = test_model( + model, + centroids, + test_prompts, + test_labels_, + device, + batch_size, + layer_number, + ) + test_auroc = roc_auc_score(test_labels_combined, test_predictions) + + print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.4f}") + losses.append(epoch_loss) + + if test_auroc > best_test_auroc: + best_test_auroc = test_auroc + best_test_epoch = epoch + args.init_num_epochs + print( + f"Best test AUROC: {best_test_auroc:.4f}, at epoch: {best_test_epoch}" + ) + logging.info( + f"Best test AUROC: {best_test_auroc:.4f}, at epoch: {best_test_epoch}" + ) + + logging.info( + f"Epoch [{epoch+1}/{num_epochs}], Train Loss: {epoch_loss:.4f}, " + ) + logging.info( + f"Best test AUROC: {best_test_auroc:.4f}, at epoch: {best_test_epoch}" + ) + + return best_test_auroc + + + +def test_model(model, centroids, test_prompts, test_labels, device, batch_size, layer_number): + """ + 在论文里相当于: + - 对 test 集算当前 TSV steer 后的 r_v + - 计算与两个原型 μ_c 的余弦相似度 + - 用 softmax 得到属于 truthful 的概率 p(c=truthful | r_v) + - 用这个概率做 AUROC 评估 + """ + model.eval() + val_predictions = [] # 保存 p_truthful + val_labels_combined = [] # 对应的 gt(二分类标签) + + num_val_samples = len(test_prompts) + + with torch.no_grad(): + with autocast(dtype=torch.float16): + for batch_start in range(0, num_val_samples, batch_size): + batch_prompts = test_prompts[batch_start:batch_start + batch_size] + batch_labels = test_labels[batch_start:batch_start + batch_size] + batch_prompts, batch_labels = collate_fn(batch_prompts, batch_labels) + + attention_mask = (batch_prompts != 0).half().to(device) + batch_prompts = batch_prompts.to(device) + batch_labels = batch_labels.to(device) + + # 直接 forward + 拿最后一个非 padding token 的 hidden 表示作为 r_v + output = model( + batch_prompts.squeeze(), + attention_mask=attention_mask.squeeze(), + output_hidden_states=True, + ) + hidden_states = output.hidden_states + hidden_states = torch.stack(hidden_states, dim=0).squeeze() + last_layer_hidden_state = hidden_states[layer_number] + last_token_rep = get_last_non_padded_token_rep( + last_layer_hidden_state, attention_mask.squeeze() + ) + + # 归一化到球面 & 原型也归一化 + last_token_rep = F.normalize(last_token_rep, p=2, dim=-1) + centroids = F.normalize(centroids, p=2, dim=-1) + + with autocast(dtype=torch.float16): + similarities = torch.matmul(last_token_rep, centroids.T) # [B, 2] + + # 相似度 / 温度 -> softmax -> 取“真”的那一维作为概率 + similarity_scores = torch.softmax(similarities / 0.1, dim=-1) + similarity_scores = similarity_scores[:, 1] # 假设 index 1 = truthful + + val_predictions.append(similarity_scores.cpu()) + val_labels_combined.append(batch_labels.cpu()) + + val_predictions = torch.cat(val_predictions) + val_labels_combined = torch.cat(val_labels_combined) + return val_predictions, val_labels_combined + + + +HF_NAMES = { + 'llama3.1-8B': 'meta-llama/Meta-Llama-3.1-8B', + 'qwen2.5-7B': 'Qwen/Qwen2.5-7B' +} + +def main(): + + parser = argparse.ArgumentParser() + parser.add_argument('--model_name', type=str, default='llama3.1-8B') + parser.add_argument('--model_prefix', type=str, default='', help='prefix of model name') + parser.add_argument('--num_gene', type=int, default=1) + parser.add_argument('--gene', type=int, default=0) + parser.add_argument('--generate_gt', type=int, default=0) + parser.add_argument('--dataset_name', type=str, default='tqa') + parser.add_argument('--device', type=int, default=0) + parser.add_argument('--wild_ratio', type=float, default=0.75) + parser.add_argument('--thres_gt', type=float, default=0.5) + parser.add_argument('--most_likely', type=int, default=0) + parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data') + parser.add_argument("--batch_size", type=int, default=128) + parser.add_argument("--cos_temp", type=float, default=0.1) + parser.add_argument("--ema_decay", type=float, default=0.99) + parser.add_argument("--lr", type=float, default=0.005) + parser.add_argument("--str_layer", type=int, default=9) + parser.add_argument("--component", type=str, default='res') + parser.add_argument("--lam", type=float, default=5) + parser.add_argument("--init_num_epochs", type=int, default=20) + parser.add_argument("--aug_num_epochs", type=int, default=20) + parser.add_argument("--num_exemplars", type=int, default=32) + parser.add_argument("--num_selected_data", type=int, default=128) + parser.add_argument("--cls_dist", type=str, default='proxy') + parser.add_argument("--optimizer", type=str, default='AdamW') + parser.add_argument("--num_iters_sk", type=int, default=3) + parser.add_argument("--epsilon_sk", type=float, default=0.05) + + args = parser.parse_args() + + model_name_or_path = HF_NAMES[args.model_prefix + args.model_name] + + if args.dataset_name == "tqa": + dataset = load_dataset("truthful_qa", 'generation')['validation'] + + elif args.dataset_name == 'triviaqa': + dataset = load_dataset("trivia_qa", "rc.nocontext", split="validation") + id_mem = set() + + def remove_dups(batch): + if batch['question_id'][0] in id_mem: + return {_: [] for _ in batch.keys()} + id_mem.add(batch['question_id'][0]) + return batch + + dataset = dataset.map(remove_dups, batch_size=1, batched=True, load_from_cache_file=False) + + + elif args.dataset_name == 'sciq': + dataset = load_dataset("allenai/sciq", split="validation") + + elif args.dataset_name == 'nq_open': + dataset = load_dataset("google-research-datasets/nq_open", split="validation") + + + else: + raise ValueError("Invalid dataset name") + + + if args.gene: + + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, token = '') + model = AutoModelForCausalLM.from_pretrained(model_name_or_path, low_cpu_mem_usage=True, torch_dtype=torch.float16, device_map="auto", token = '') + device = torch.device("cuda") + all_decoded_answers = [] + begin_index = 0 + end_index = len(dataset) + + if not os.path.exists(f'./save_for_eval/{args.dataset_name}_hal_det/'): + os.mkdir(f'./save_for_eval/{args.dataset_name}_hal_det/') + + if not os.path.exists(f'./save_for_eval/{args.dataset_name}_hal_det/answers'): + os.mkdir(f'./save_for_eval/{args.dataset_name}_hal_det/answers') + + period_token_id = [tokenizer(_)['input_ids'][-1] for _ in ['\n']] + period_token_id += [tokenizer.eos_token_id] + + for i in range(begin_index, end_index): + answers = [None] * args.num_gene + answers_ = [None] * args.num_gene + + question = dataset[i]['question'] + prompt = tokenizer(f"Answer the question concisely. Q: {question}" + " A:", return_tensors='pt').input_ids.cuda() + + for gen_iter in range(args.num_gene): + if args.most_likely: + generated = model.generate(prompt, + num_beams=5, + num_return_sequences=1, + do_sample=False, + max_new_tokens=64, + ) + else: + generated = model.generate(prompt, + do_sample=True, + num_return_sequences=1, + num_beams=1, + max_new_tokens=64, + temperature=0.5, + top_p=1.0) + + + decoded = tokenizer.decode(generated[0, prompt.shape[-1]:], + skip_special_tokens=True) + # answers[gen_iter] = decoded + + # Cleaning + if '\nAnswer the question concisely.' in decoded: + print('#####error') + print(decoded.split('\nAnswer the question concisely.')[1]) + print('#####error') + decoded = decoded.split('\nAnswer the question concisely.')[0] + + if 'Answer the question concisely' in decoded: + print('#####error') + print(decoded.split('Answer the question concisely')[1]) + print('#####error') + decoded = decoded.split('Answer the question concisely')[0] + + if 'The answer to the question' in decoded: + print('#####error') + print(decoded.split('The answer to the question')[1]) + print('#####error') + decoded = decoded.split('The answer to the question')[0] + + if 'How to Write a Concise Statement' in decoded: + print('#####error') + print(decoded.split('How to Write a Concise Statement')[1]) + print('#####error') + decoded = decoded.split('How to Write a Concise Statement')[0] + + if 'Q:' in decoded: + print('#####error') + print(decoded.split('Q:')[1]) + print('#####error') + decoded = decoded.split('Q:')[0] + + if '\nYou are an AI assistant' in decoded: + print('#####error') + print(decoded.split('\nYou are an AI assistant')[1]) + print('#####error') + decoded = decoded.split('\nYou are an AI assistant')[0] + + if 'You are an AI assistant' in decoded: + print('#####error') + print(decoded.split('You are an AI assistant')[1]) + print('#####error') + decoded = decoded.split('You are an AI assistant')[0] + + if 'A:' in decoded: + print('#####error') + print(decoded.split('A:')[1]) + print('#####error') + decoded = decoded.split('A:')[0] + + if 'B:' in decoded: + print('#####error') + print(decoded.split('B:')[1]) + print('#####error') + decoded = decoded.split('B:')[0] + + if 'C:' in decoded: + print('#####error') + print(decoded.split('C:')[1]) + print('#####error') + decoded = decoded.split('C:')[0] + + if 'D:' in decoded: + print('#####error') + print(decoded.split('D:')[1]) + print('#####error') + decoded = decoded.split('D:')[0] + + print(f'Cleaned Answer: {decoded}') + answers[gen_iter] = decoded + + + + print('sample: ', i) + if args.most_likely: + info = 'most_likely_' + else: + info = 'batch_generations_' + + print("Saving answers") + print(decoded) + + np.save(f'./save_for_eval/{args.dataset_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy', + answers) + + elif args.generate_gt: + from bleurt_pytorch import BleurtForSequenceClassification, BleurtTokenizer + + model = BleurtForSequenceClassification.from_pretrained('lucadiliello/BLEURT-20').cuda() + tokenizer = BleurtTokenizer.from_pretrained('lucadiliello/BLEURT-20') + model.eval() + + gts = np.zeros(0) + length = len(dataset) + + for i in range(length): + + if args.dataset_name == 'tqa': + best_answer = dataset[i]['best_answer'] + correct_answer = dataset[i]['correct_answers'] + all_answers = [best_answer] + correct_answer + question = dataset[i]['question'] + + elif args.dataset_name == 'triviaqa': + all_answers = dataset[i]['answer']['aliases'] + + if args.most_likely: + # answers = np.load( + # f'./save_for_eval/{args.dataset_name}_hal_det/answers/most_likely_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') + answers = np.load( + f'./save_for_eval/{args.dataset_name}_hal_det/answers/most_likely_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') + + else: + answers = np.load( + f'./save_for_eval/{args.dataset_name}_hal_det/answers/batch_generations_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') + + # get the gt. + predictions = answers + all_results = np.zeros((len(all_answers), len(predictions))) + with torch.no_grad(): + for anw in range(len(all_answers)): + inputs = tokenizer(predictions.tolist(), [all_answers[anw]] * len(predictions), + padding='longest', return_tensors='pt') + for key in list(inputs.keys()): + inputs[key] = inputs[key].cuda() + res = np.asarray(model(**inputs).logits.flatten().tolist()) + all_results[anw] = res + gts = np.concatenate([gts, np.max(all_results, axis=0)], 0) + if i % 10 == 0: + print("samples passed: ", i) + + if args.most_likely: + # np.save(f'./ml_{args.dataset_name}_bleurt_score.npy', gts) + np.save(f'./ml_{args.dataset_name}_bleurt_score.npy', gts) + + else: + np.save(f'./bg_{args.dataset_name}_bleurt_score.npy', gts) + + + + else: + + device = torch.device("cuda") + model = AutoModelForCausalLM.from_pretrained(model_name_or_path, low_cpu_mem_usage=True, torch_dtype=torch.float16, device_map="auto", token = '') + tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, token = '') + + prompts = [] + qa_pairs = [] + categories = [] + + length = len(dataset) + + + for i in tqdm(range(length)): + + question = dataset[i]['question'] + if args.dataset_name == 'tqa': + categories.append(dataset[i]['category']) + + answers = np.load( + f'./save_for_eval/{args.dataset_name}_hal_det/answers/most_likely_hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy') + + + for anw in answers: + + prompt = tokenizer( + f"Answer the question concisely. Q: {question}" + " A:" + anw, + return_tensors='pt').input_ids.cuda() + + prompts.append(prompt) + qa_pairs.append({'Question': question, 'Answer': anw}) + + gts = np.load(f'./ml_{args.dataset_name}_bleurt_score.npy') + + + length = len(dataset) + + if args.dataset_name == 'tqa' or args.dataset_name == 'triviaqa': + args.thres_gt = 0.5 + + else: + args.thres_gt = 0.2 + + gt_label = np.asarray(gts> args.thres_gt, dtype=np.int32) + + # index = np.random.permutation(length) + + # exemplar_index = index[:args.num_exemplars] + + # wild_q_indices = index[:int(args.wild_ratio * length)] + + index = np.load(f'data_indices/data_index_{args.dataset_name}.npy') + + exemplar_index = np.load(f'data_indices/exemplar_idx_{args.dataset_name}.npy') + + wild_q_indices = index[:int(args.wild_ratio * length)] + + wild_q_indices1 = wild_q_indices[:len(wild_q_indices) - 100] + + args.num_exemplars = len(exemplar_index) + + gt_label_test = [] + gt_label_wild = [] + gt_label_exemplar = [] + + test_prompts = [] + train_prompts = [] + exemplar_prompts = [] + + + for i in range(length): + if i not in wild_q_indices: + gt_label_test.extend(gt_label[i: i+1]) + test_prompts.extend(prompts[i:i+1]) + + elif i in exemplar_index: + gt_label_exemplar.extend(gt_label[i: i+1]) + exemplar_prompts.extend(prompts[i:i+1]) + + elif i in wild_q_indices1: + gt_label_wild.extend(gt_label[i: i+1]) + train_prompts.extend(prompts[i:i+1]) + + gt_label_test = np.asarray(gt_label_test) + gt_label_exemplar = np.asarray(gt_label_exemplar) + gt_label_wild = np.asarray(gt_label_wild) + + labels = [ gt_label_test, gt_label_wild, gt_label_exemplar] + prompts = [ test_prompts, train_prompts, exemplar_prompts] + + num_layers = model.config.num_hidden_layers + hidden_size = model.config.hidden_size + + for param in model.parameters(): + param.requires_grad = False + + tsv = nn.ParameterList( + [nn.Parameter(torch.zeros(hidden_size), requires_grad=True) for _ in range(num_layers)]) + + tsv.to(device) + + add_tsv_layers(model, tsv, [args.lam], args) + + optimizer = torch.optim.AdamW(list(tsv.parameters()), lr=args.lr) + + train_model(model, optimizer, device, prompts, labels, args=args) + +if __name__ == '__main__': + seed_everything(42) + main() \ No newline at end of file