Compare commits

..

1 Commits

Author SHA1 Message Date
weixin_43297441 be4e5789d9 save branch 2025-03-05 16:56:01 +08:00
5 changed files with 183 additions and 257 deletions

View File

@ -9,8 +9,6 @@ from tqdm import tqdm
import numpy as np
import pickle
# from utils import get_llama_activations_bau, tokenized_tqa, tokenized_tqa_gen, tokenized_tqa_gen_end_q
from utils import mahalanobis_distance
from scipy.io import savemat
import llama_iti
import pickle
import argparse
@ -60,7 +58,7 @@ def main():
parser.add_argument('--num_gene', type=int, default=1)
parser.add_argument('--use_rouge', type=bool, default= False)
parser.add_argument('--weighted_svd', type=int, default=0)
parser.add_argument('--feat_loc_svd', type=int, default=1)
parser.add_argument('--feat_loc_svd', type=int, default=0)
parser.add_argument('--wild_ratio', type=float, default=0.75)
parser.add_argument('--thres_gt', type=float, default=0.5)
parser.add_argument('--most_likely', type=bool, default=True)
@ -173,7 +171,6 @@ def main():
embed_generated = []
embed_generated_h =[]
embed_generated_t=[]
if args.dataset_name == 'tydiqa':
length = len(used_indices)
@ -190,6 +187,10 @@ def main():
info = 'batch_generations_'
answers = np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy')
truths= np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/truths/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_truths_index_{i}.npy')
hallucinations= np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/hallucinations/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_hallucinations_index_{i}.npy')
for anw in answers:
if args.dataset_name == 'tydiqa':
@ -208,153 +209,135 @@ def main():
hidden_states = torch.stack(hidden_states, dim=0).squeeze()
hidden_states = hidden_states.detach().cpu().numpy()[:, -1, :]
embed_generated.append(hidden_states)
embed_generated = np.asarray(np.stack(embed_generated), dtype=np.float32)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_layer_wise.npy', embed_generated)
embed_generated = np.asarray(np.stack(embed_generated), dtype=np.float32)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_layer_wise.npy', embed_generated)
embed_generated_t_loc2 = []
embed_generated_t_loc1 = []
embed_generated_h_loc2 = []
embed_generated_h_loc1 = []
embed_generated_loc2 = []
embed_generated_loc1 = []
for i in tqdm(range(length)):
if args.dataset_name == 'tydiqa':
question = dataset[int(used_indices[i])]['question']
else:
question = dataset[i]['question']
answers = np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy')
truths= np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/truths/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_truths_index_{i}.npy')
hallucinations= np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/hallucinations/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_hallucinations_index_{i}.npy')
for anw in answers:
if args.dataset_name == 'tydiqa':
prompt = tokenizer(
"Concisely answer the following question based on the information in the given passage: \n" + \
" Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:" + anw,
return_tensors='pt').input_ids.cuda()
elif args.dataset_name == 'coqa':
prompt = tokenizer(dataset[i]['prompt'] + anw, return_tensors='pt').input_ids.cuda()
else:
prompt = tokenizer(
f"Answer the question concisely. Q: {question}" + " A:" + anw,
return_tensors='pt').input_ids.cuda()
with torch.no_grad():
with TraceDict(model, HEADS + MLPS) as ret:
output = model(prompt, output_hidden_states=True)
head_wise_hidden_states = [ret[head].output.squeeze().detach().cpu() for head in HEADS]
head_wise_hidden_states = torch.stack(head_wise_hidden_states, dim=0).squeeze().numpy()
mlp_wise_hidden_states = [ret[mlp].output.squeeze().detach().cpu() for mlp in MLPS]
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
embed_generated_loc2.append(mlp_wise_hidden_states[:, -1, :])
embed_generated_loc1.append(head_wise_hidden_states[:, -1, :])
for hal in hallucinations:
if args.dataset_name == 'tydiqa':
prompt = tokenizer(
"Concisely answer the following question based on the information in the given passage: \n" + \
" Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:" + hal,
return_tensors='pt').input_ids.cuda()
elif args.dataset_name == 'coqa':
prompt = tokenizer(dataset[i]['prompt'] + hal, return_tensors='pt').input_ids.cuda()
else:
prompt = tokenizer(
f"Answer the question concisely. Q: {question}" + " A:" + hal,
return_tensors='pt').input_ids.cuda()
with torch.no_grad():
with TraceDict(model, HEADS + MLPS) as ret:
output = model(prompt, output_hidden_states=True)
head_wise_hidden_states = [ret[head].output.squeeze().detach().cpu() for head in HEADS]
head_wise_hidden_states = torch.stack(head_wise_hidden_states, dim=0).squeeze().numpy()
mlp_wise_hidden_states = [ret[mlp].output.squeeze().detach().cpu() for mlp in MLPS]
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
embed_generated_h_loc2.append(mlp_wise_hidden_states[:, -1, :])
embed_generated_h_loc1.append(head_wise_hidden_states[:, -1, :])
for tru in truths:
if args.dataset_name == 'tydiqa':
prompt = tokenizer(
prompt = tokenizer(
"Concisely answer the following question based on the information in the given passage: \n" + \
" Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:" + tru,
return_tensors='pt').input_ids.cuda()
elif args.dataset_name == 'coqa':
prompt = tokenizer(dataset[i]['prompt'] + tru, return_tensors='pt').input_ids.cuda()
prompt = tokenizer(dataset[i]['prompt'] + tru, return_tensors='pt').input_ids.cuda()
else:
prompt = tokenizer(
prompt = tokenizer(
f"Answer the question concisely. Q: {question}" + " A:" + tru,
return_tensors='pt').input_ids.cuda()
with torch.no_grad():
with TraceDict(model, HEADS + MLPS) as ret:
output = model(prompt, output_hidden_states=True)
head_wise_hidden_states = [ret[head].output.squeeze().detach().cpu() for head in HEADS]
head_wise_hidden_states = torch.stack(head_wise_hidden_states, dim=0).squeeze().numpy()
mlp_wise_hidden_states = [ret[mlp].output.squeeze().detach().cpu() for mlp in MLPS]
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
hidden_states = model(prompt, output_hidden_states=True).hidden_states
hidden_states = torch.stack(hidden_states, dim=0).squeeze()
hidden_states = hidden_states.detach().cpu().numpy()[:, -1, :]
embed_generated_t.append(hidden_states)
embed_generated_t = np.asarray(np.stack(embed_generated_t), dtype=np.float32)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_layer_wise.npy', embed_generated_t)
for hal in hallucinations:
if args.dataset_name == 'tydiqa':
prompt = tokenizer(
"Concisely answer the following question based on the information in the given passage: \n" + \
" Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:" + hal,
return_tensors='pt').input_ids.cuda()
elif args.dataset_name == 'coqa':
prompt = tokenizer(dataset[i]['prompt'] + hal, return_tensors='pt').input_ids.cuda()
else:
prompt = tokenizer(
f"Answer the question concisely. Q: {question}" + " A:" + hal,
return_tensors='pt').input_ids.cuda()
with torch.no_grad():
hidden_states = model(prompt, output_hidden_states=True).hidden_states
hidden_states = torch.stack(hidden_states, dim=0).squeeze()
hidden_states = hidden_states.detach().cpu().numpy()[:, -1, :]
embed_generated_h.append(hidden_states)
embed_generated_h = np.asarray(np.stack(embed_generated_h), dtype=np.float32)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_layer_wise.npy', embed_generated_h)
embed_generated_loc2 = []
embed_generated_loc1 = []
for i in tqdm(range(length)):
if args.dataset_name == 'tydiqa':
question = dataset[int(used_indices[i])]['question']
else:
question = dataset[i]['question']
answers = np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/answers/' + info + f'hal_det_{args.model_name}_{args.dataset_name}_answers_index_{i}.npy')
for anw in answers:
if args.dataset_name == 'tydiqa':
prompt = tokenizer(
"Concisely answer the following question based on the information in the given passage: \n" + \
" Passage: " + dataset[int(used_indices[i])]['context'] + " \n Q: " + question + " \n A:",
return_tensors='pt').input_ids.cuda()
elif args.dataset_name == 'coqa':
prompt = tokenizer(dataset[i]['prompt'] + anw, return_tensors='pt').input_ids.cuda()
else:
prompt = tokenizer(
f"Answer the question concisely. Q: {question}" + " A:" + anw,
return_tensors='pt').input_ids.cuda()
with torch.no_grad():
with TraceDict(model, HEADS + MLPS) as ret:
output = model(prompt, output_hidden_states=True)
head_wise_hidden_states = [ret[head].output.squeeze().detach().cpu() for head in HEADS]
head_wise_hidden_states = torch.stack(head_wise_hidden_states, dim=0).squeeze().numpy()
mlp_wise_hidden_states = [ret[mlp].output.squeeze().detach().cpu() for mlp in MLPS]
mlp_wise_hidden_states = torch.stack(mlp_wise_hidden_states, dim=0).squeeze().numpy()
embed_generated_loc2.append(mlp_wise_hidden_states[:, -1, :])
embed_generated_loc1.append(head_wise_hidden_states[:, -1, :])
embed_generated_loc2 = np.asarray(np.stack(embed_generated_loc2), dtype=np.float32)
embed_generated_loc1 = np.asarray(np.stack(embed_generated_loc1), dtype=np.float32)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_head_wise.npy', embed_generated_loc1)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_embeddings_mlp_wise.npy', embed_generated_loc2)
embed_generated_t_loc2.append(mlp_wise_hidden_states[:, -1, :])
embed_generated_t_loc1.append(head_wise_hidden_states[:, -1, :])
embed_generated_loc2 = np.asarray(np.stack(embed_generated_loc2), dtype=np.float32)
embed_generated_loc1 = np.asarray(np.stack(embed_generated_loc1), dtype=np.float32)
embed_generated_h_loc2 = np.asarray(np.stack(embed_generated_h_loc2), dtype=np.float32)
embed_generated_h_loc1 = np.asarray(np.stack(embed_generated_h_loc1), dtype=np.float32)
embed_generated_t_loc2 = np.asarray(np.stack(embed_generated_t_loc2), dtype=np.float32)
embed_generated_t_loc1 = np.asarray(np.stack(embed_generated_t_loc1), dtype=np.float32)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_head_wise.npy', embed_generated_loc1)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_embeddings_mlp_wise.npy', embed_generated_loc2)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_head_wise.npy', embed_generated_h_loc2)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_mlp_wise.npy', embed_generated_h_loc1)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_head_wise.npy', embed_generated_t_loc2)
np.save(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_mlp_wise.npy', embed_generated_t_loc1)
# get the split and label (true or false) of the unlabeled data and the test data.
if args.use_rouge:
gts = np.load(f'./ml_{args.dataset_name}_{args.model_name}_rouge_score.npy')
gts_bg = np.load(f'./bg_{args.dataset_name}_{args.model_name}_rouge_score.npy')
else:
gts = np.load(f'./ml_{args.dataset_name}_{args.model_name}_bleurt_score.npy')
gts_bg = np.load(f'./bg_{args.dataset_name}_{args.model_name}_bleurt_score.npy')
if args.use_rouge:
gts = np.load(f'./ml_{args.dataset_name}_{args.model_name}_rouge_score.npy')
gts_bg = np.load(f'./bg_{args.dataset_name}_{args.model_name}_rouge_score.npy')
else:
gts = np.load(f'./ml_{args.dataset_name}_{args.model_name}_bleurt_score.npy')
gts_bg = np.load(f'./bg_{args.dataset_name}_{args.model_name}_bleurt_score.npy')
thres = args.thres_gt
gt_label = np.asarray(gts> thres, dtype=np.int32)
gt_label_bg = np.asarray(gts_bg > thres, dtype=np.int32)
if args.dataset_name == 'tydiqa':
length = len(used_indices)
else:
length = len(dataset)
permuted_index = np.random.permutation(length)
wild_q_indices = permuted_index[:int(args.wild_ratio * length)]
# exclude validation samples.
wild_q_indices1 = wild_q_indices[:len(wild_q_indices) - 100]
wild_q_indices2 = wild_q_indices[len(wild_q_indices) - 100:]
gt_label_test = []
gt_label_wild = []
gt_label_val = []
for i in range(length):
if i not in wild_q_indices:
gt_label_test.extend(gt_label[i: i+1])
elif i in wild_q_indices1:
gt_label_wild.extend(gt_label[i: i+1])
if args.dataset_name == 'tydiqa':
length = len(used_indices)
else:
gt_label_val.extend(gt_label[i: i+1])
gt_label_test = np.asarray(gt_label_test)
gt_label_wild = np.asarray(gt_label_wild)
gt_label_val = np.asarray(gt_label_val)
length = len(dataset)
permuted_index = np.random.permutation(length)
wild_q_indices = permuted_index[:int(args.wild_ratio * length)]
# exclude validation samples.
wild_q_indices1 = wild_q_indices[:len(wild_q_indices) - 100]
wild_q_indices2 = wild_q_indices[len(wild_q_indices) - 100:]
gt_label_test = []
gt_label_wild = []
gt_label_val = []
for i in range(length):
if i not in wild_q_indices:
gt_label_test.extend(gt_label[i: i+1])
elif i in wild_q_indices1:
gt_label_wild.extend(gt_label[i: i+1])
else:
gt_label_val.extend(gt_label[i: i+1])
gt_label_test = np.asarray(gt_label_test)
gt_label_wild = np.asarray(gt_label_wild)
gt_label_val = np.asarray(gt_label_val)
def svd_embed_score(embed_generated_wild, gt_label,embed_generated_h,embed_generated_t, begin_k, k_span, mean=1, svd=10, epsilon=1e-20):
def svd_embed_score(embed_generated_wild, gt_label,embed_generated_h,embed_generated_t, begin_k, k_span, mean=1, svd=1, weight=0):
embed_generated = embed_generated_wild
# embed_hallucination= embed_generated_h
best_auroc_over_k = 0
@ -368,75 +351,37 @@ def main():
mean_recorded = None
# best_projection = None
for layer in range(len(embed_generated_wild[0])):
# print(len(embed_generated_wild[0]))
if mean:
mean_recorded = embed_generated[:, layer, :].mean(0)
centered = embed_generated[:, layer, :] - mean_recorded
mean_h=embed_generated_h[:, layer, :].mean(0)
centered_h=embed_generated_h[:, layer, :]-mean_h
mean_t=embed_generated_t[:, layer, :].mean(0)
centered_t=embed_generated_t[:, layer, :]-mean_t
else:
centered = embed_generated[:, layer, :]
mean_h=embed_generated_h[:, layer, :].mean(0)
centered_h=embed_generated_h[:, layer, :]-mean_h
mean_t=embed_generated_t[:, layer, :].mean(0)
centered_t=embed_generated_t[:, layer, :].mean(0)-mean_t
# if not svd:
# assert "Not implemented!"
# else:
centered=torch.from_numpy(centered).cuda()
centered_h=torch.from_numpy(centered_h).cuda()
centered_t=torch.from_numpy(centered_t).cuda()
_, sin_value, V_p = torch.linalg.svd(centered, full_matrices=False)
sin_value_squared = torch.diag(sin_value[:k]) ** 2
V_p = V_p[:k, :]
C=(1 / centered.shape[0])* V_p.T @ sin_value_squared @ V_p
_, sin_value_h, V_p_h = torch.linalg.svd(centered_h, full_matrices=False)
sin_value_h_squared = torch.diag(sin_value_h[:k]) ** 2
V_p_h = V_p_h[:k, :]
C_h=(1 / centered_h.shape[0])* V_p_h.T @ sin_value_h_squared @ V_p_h
# print(centered_t.shape)
_, sin_value_t, V_p_t = torch.linalg.svd(centered_t, full_matrices=False)
sin_value_t_squared = torch.diag(sin_value_t[:k]) ** 2
V_p_t = V_p_t[:k, :]
C_t=(1 / centered_t.shape[0])* V_p_t.T @ sin_value_t_squared @ V_p_t
inv_C_t= torch.linalg.pinv(C_t) + torch.eye(C_t.shape[0], dtype=int).cuda() * epsilon
inv_C_h= torch.linalg.pinv(C_h) + torch.eye(C_h.shape[0], dtype=int).cuda() * epsilon
test_t=torch.from_numpy(embed_generated[:, layer, :]).cuda()-centered_t
test_h=torch.from_numpy(embed_generated[:, layer, :]).cuda()-centered_h
# scores= torch.sqrt(torch.clamp(test_t @ inv_C_t @ test_t.T, min=0.0))
# - torch.sqrt(torch.clamp(test_h @ inv_C_h @ test_h.T, min=0.0))
scores= torch.sqrt(torch.clamp(centered @ inv_C_t @ centered.T, min=0.0))
- torch.sqrt(torch.clamp(centered @ inv_C_h @ centered.T, min=0.0))
# scores= mahalanobis_distance(torch.from_numpy(embed_generated[:, layer, :]).cuda(), torch.from_numpy(mean_recorded).cuda(), C_) torch.clamp(centered @ inv_C_t @ centered.T, min=0.0)
# - mahalanobis_distance(torch.from_numpy(embed_generated[:, layer, :]).cuda(), torch.from_numpy(mean_t).cuda(), C_t)
# + mahalanobis_distance(torch.from_numpy(embed_generated[:, layer, :]).cuda(), torch.from_numpy(mean_h).cuda(), C_h) centered @ inv_C_h @ centered.T
scores = torch.mean(scores, -1, keepdim=True)
scores = torch.sqrt(torch.sum(torch.square(scores), dim=1))
# projection=V_p[:k, :].T
# scores1 = torch.mean(centered @ projection, -1, keepdim=True)
# scores1 = torch.sqrt(torch.sum(torch.square(scores1), dim=1))
_, sin_value, V_p = torch.linalg.svd(torch.from_numpy(centered).cuda())
C=(1 / centered.shape[0])* V_p.T.cpu().data.numpy() @ np.diag(sin_value.cpu().data.numpy() ** 2)
_, sin_value_h, V_p_h = torch.linalg.svd(torch.from_numpy(centered_h).cuda())
C_h=(1 / centered_h.shape[0])* V_p_h.T.cpu().data.numpy() @ np.diag(sin_value_h.cpu().data.numpy() ** 2)
_, sin_value_t, V_p_t = torch.linalg.svd(torch.from_numpy(centered_t).cuda())
C_t=(1 / centered_t.shape[0])* V_p_t.T.cpu().data.numpy() @ np.diag(sin_value_t.cpu().data.numpy() ** 2)
scores= (centered*np.invert(C)*centered.T) ** 0.5 - ((embed_generated[:, layer, :]-mean_t)*np.invert(C_t)*(embed_generated[:, layer, :]-mean_t).T) ** 0.5
+ ((embed_generated[:, layer, :]-mean_h)*np.invert(C_h)*(embed_generated[:, layer, :]-mean_h).T) ** 0.5
# clf = Perceptron(tol=1e-3, random_state=0)
# not sure about whether true and false data the direction will point to,
# so we test both. similar practices are in the representation engineering paper
# https://arxiv.org/abs/2310.01405
scores=scores.data.cpu().numpy()
# scores1=scores1.data.cpu().numpy()
measures1 = get_measures(scores[gt_label == 1],
scores[gt_label == 0], plot=False)
measures2 = get_measures(-scores[gt_label == 1],
@ -453,7 +398,8 @@ def main():
best_auroc = measures[0]
best_result = [100 * measures[2], 100 * measures[0]]
best_layer = layer
best_scores = sign_layer * scores
# best_scores = sign_layer * scores
# best_projection = projection
best_mean = mean_recorded
best_sign = sign_layer
print('k: ', k, 'best result: ', best_result, 'layer: ', best_layer,
@ -465,7 +411,7 @@ def main():
best_layer_over_k = best_layer
best_k = k
best_sign_over_k = best_sign
best_scores_over_k = best_scores
# best_scores_over_k = best_scores
# best_projection_over_k = best_projection
best_mean_over_k = best_mean
@ -481,51 +427,56 @@ def main():
}
from sklearn.decomposition import PCA
feat_loc = args.feat_loc_svd
from sklearn.decomposition import PCA
feat_loc = args.feat_loc_svd
if args.most_likely:
if feat_loc == 1:
embed_generated = np.load(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_head_wise.npy',
if args.most_likely:
if feat_loc == 3:
embed_generated = np.load(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_layer_wise.npy',
allow_pickle=True)
embed_generated_h = np.load(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_head_wise.npy',
embed_generated_t = np.load(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_layer_wise.npy',
allow_pickle=True)
embed_generated_t = np.load(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_head_wise.npy',
embed_generated_h = np.load(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_layer_wise.npy',
allow_pickle=True)
elif feat_loc == 2:
embed_generated = np.load(
elif feat_loc == 2:
embed_generated = np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_mlp_wise.npy',
allow_pickle=True)
embed_generated_h = np.load(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_h_mlp_wise.npy',
allow_pickle=True)
embed_generated_t = np.load(f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_t_mlp_wise.npy',
allow_pickle=True)
else:
assert "Not supported!"
feat_indices_wild = []
feat_indices_eval = []
else:
embed_generated = np.load(
f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info + f'{args.model_name}_gene_embeddings_head_wise.npy',
allow_pickle=True)
feat_indices_wild = []
feat_indices_eval = []
if args.dataset_name == 'tydiqa':
length = len(used_indices)
else:
length = len(dataset)
if args.dataset_name == 'tydiqa':
length = len(used_indices)
else:
length = len(dataset)
for i in range(length):
if i in wild_q_indices1:
feat_indices_wild.extend(np.arange(i, i+1).tolist())
elif i in wild_q_indices2:
feat_indices_eval.extend(np.arange(i, i + 1).tolist())
if feat_loc == 3:
embed_generated_wild = embed_generated[feat_indices_wild][:,1:,:]
embed_generated_eval = embed_generated[feat_indices_eval][:, 1:, :]
embed_generated_hal,embed_generated_tru=embed_generated_h[feat_indices_wild][:,1:,:], embed_generated_t[feat_indices_wild][:,1:,:]
else:
embed_generated_wild = embed_generated[feat_indices_wild]
embed_generated_eval = embed_generated[feat_indices_eval]
embed_generated_hal,embed_generated_tru=embed_generated_h[feat_indices_eval], embed_generated_t[feat_indices_eval]
for i in range(length):
if i in wild_q_indices1:
feat_indices_wild.extend(np.arange(i, i+1).tolist())
elif i in wild_q_indices2:
feat_indices_eval.extend(np.arange(i, i + 1).tolist())
if feat_loc == 3:
# print(embed_generated.shape)
# print(embed_generated_h.shape)
# print(embed_generated_t.shape)
embed_generated_wild = embed_generated[feat_indices_wild][:,1:,:]
embed_generated_eval = embed_generated[feat_indices_eval][:, 1:, :]
embed_generated_hal,embed_generated_tru=embed_generated_h[feat_indices_wild][:,1:,:], embed_generated_t[feat_indices_wild][:,1:,:]
else:
embed_generated_wild = embed_generated[feat_indices_wild]
embed_generated_eval = embed_generated[feat_indices_eval]
# print(embed_generated.shape)
# print(embed_generated_h.shape)
# print(embed_generated_t.shape)
# print(feat_indices_wild)
embed_generated_hal,embed_generated_tru=embed_generated_h[feat_indices_wild], embed_generated_t[feat_indices_wild]
@ -535,7 +486,7 @@ def main():
# 1, 11, mean=0, svd=0, weight=args.weighted_svd)
# get the best hyper-parameters on validation set
returned_results = svd_embed_score(embed_generated_eval, gt_label_val, embed_generated_hal,embed_generated_tru,
1, 15, mean=1, svd=10)
1, 11, mean=1, svd=1, wei1ght=args.weighted_svd)
pca_model = PCA(n_components=returned_results['k'], whiten=False).fit(embed_generated_wild[:,returned_results['best_layer'],:])
projection = pca_model.components_.T
@ -563,10 +514,7 @@ def main():
assert test_scores.shape[1] == 1
test_scores = np.sqrt(np.sum(np.square(test_scores), axis=1))
mdic = {"gt_1": test_scores[gt_label_test == 1], "gt_0": test_scores[gt_label_test == 0], "scale_gt_1":returned_results['best_sign'] * test_scores[gt_label_test == 1],
"scale_gt_0": returned_results['best_sign'] *test_scores[gt_label_test == 0]
}
savemat("tqa_score.mat", mdic)
measures = get_measures(returned_results['best_sign'] * test_scores[gt_label_test == 1],
returned_results['best_sign'] *test_scores[gt_label_test == 0], plot=False)
print_measures(measures[0], measures[1], measures[2], 'direct-projection')
@ -612,7 +560,6 @@ def main():
clf.eval()
torch.save(clf.state_dict(), f'save_for_eval/{args.dataset_name}/{args.model_name}_hal_det/' + info+ '_{layer}_model_weights.pth')
output = clf(torch.from_numpy(
embed_generated_test[:, layer, :]).cuda())
pca_wild_score_binary_cls = torch.sigmoid(output)
@ -624,7 +571,7 @@ def main():
breakpoint()
measures = get_measures(pca_wild_score_binary_cls[gt_label_test == 1],
pca_wild_score_binary_cls[gt_label_test == 0], plot=False)
# print_measures(measures[0], measures[1], measures[2], 'class-acc')
if measures[0] > best_auroc:
best_auroc = measures[0]
best_result = [100 * measures[0]]

View File

@ -62,10 +62,10 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default='step-1-8k')
parser.add_argument('--dataset_name', type=str, default='triviaqa')
parser.add_argument('--dataset_name', type=str, default='tqa')
parser.add_argument('--num_gene', type=int, default=1)
parser.add_argument('--use_api', type=bool, default=True)
parser.add_argument('--most_likely', type=bool, default=True)
parser.add_argument('--most_likely', type=bool, default=False)
parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data')
parser.add_argument("--instruction", type=str, default='/home/liwenyun/code/haloscope/generation/qa/qa_one-turn_instruction.txt', help='local directory of instruction file.')
args = parser.parse_args()
@ -175,7 +175,6 @@ def main():
raise ValueError("Invalid dataset name")
f = open(args.instruction, 'r', encoding="utf-8")
instruction = f.read()
error_output='No output'
if args.use_api:
begin_index = 0
@ -211,9 +210,8 @@ def main():
hallucination_prompt=get_hal_prompt(dataset[int(used_indices[i])]['context'],question,instruction)
truth_prompt=get_truth_prompt(dataset[int(used_indices[i])]['context'],question)
elif args.dataset_name == 'triviaqa':
prompt = get_qa_prompt("None",dataset[i]['question'])
question= dataset[i]['question']
hallucination_prompt=get_hal_prompt("None",dataset[i]['question'],instruction)
prompt = get_qa_prompt("None",dataset[i]['prompt'])
hallucination_prompt=get_hal_prompt("None",dataset[i]['prompt'],instruction)
truth_prompt=get_truth_prompt("None",question)
elif args.dataset_name == 'coqa':
prompt = get_qa_prompt("None",dataset[i]['prompt'])
@ -225,46 +223,31 @@ def main():
for gen_iter in range(args.num_gene):
if args.most_likely:
try:
response = client.chat.completions.create(
response = client.chat.completions.create(
model = args.model_name,
messages = prompt,
max_tokens=256,
top_p=1,
temperature = 1,
)
decoded=response.choices[0].message.content
except openai.APIStatusError as e:
print("error occured!"+str(gen_iter)+"responce {e}")
decoded = error_output
try:
hallucination_response = client.chat.completions.create(
hallucination_response = client.chat.completions.create(
model = args.model_name,
messages = hallucination_prompt,
max_tokens=256,
top_p=1,
temperature = 1,
)
hallucination_decoded=hallucination_response.choices[0].message.content
except openai.APIStatusError as e:
print("error occured!"+str(gen_iter)+"hallucination_responce {e}")
hallucination_decoded = error_output
if args.dataset_name == 'tydiqa' or args.dataset_name == 'tydiqa':
try:
truth_response=client.chat.completions.create(
truth_response=client.chat.completions.create(
model = args.model_name,
messages = truth_prompt,
max_tokens=256,
top_p=1,
temperature=1
)
truth_decoded=truth_response.choices[0].message.content
except openai.APIStatusError as e:
print("error occured!"+str(gen_iter)+"truth_responce {e}")
truth_decoded =error_output
truth_decoded=truth_response.choices[0].message.content
decoded=response.choices[0].message.content
hallucination_decoded=hallucination_response.choices[0].message.content
else:
response = client.chat.completions.create(
model = args.model_name,
@ -292,10 +275,10 @@ def main():
top_p=0.5,
temperature = 0.5,
)
truth_decoded=truth_response.choices[0].message.content
truth_decoded=truth_response.choices[0].message.content
decoded=response.choices[0].message.content
hallucination_decoded=hallucination_response.choices[0].message.content
time.sleep(40)
time.sleep(20)
# decoded = tokenizer.decode(generated[0, prompt.shape[-1]:],

View File

@ -50,14 +50,14 @@ def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='llama2_chat_7B')
parser.add_argument('--model_name', type=str, default='moonshot-v1-8k')
parser.add_argument('--model_name', type=str, default='step-1-8k')
parser.add_argument('--dataset_name', type=str, default='tqa')
parser.add_argument('--num_gene', type=int, default=1)
parser.add_argument('--use_api', type=bool, default=False)
parser.add_argument('--most_likely', type=bool, default=False)
parser.add_argument("--model_dir", type=str, default=None, help='local directory with model data')
parser.add_argument("--instruction", type=str, default=None, help='local directory of instruction file.')
parser.add_argument('--use_rouge', type=bool, default=True)
parser.add_argument('--use_rouge', type=bool, default=False)
parser.add_argument('--thres_gt', type=float, default=0.5)
# parser.add_argument('--model_name', type=str, default='llama2_chat_7B')

Binary file not shown.

View File

@ -954,8 +954,4 @@ You are an AI assistant. You'll provide helpful, harmless, and detailed response
"\n#Question#: " + question +
# "\n#Right Answer#: " + answer +
"\n#Answer#: "}
]
def mahalanobis_distance(x, mean, cov):
cov_inv = torch.inverse(cov)
delta = x - mean
return torch.sqrt(torch.einsum('bi,ij,bj->b', delta, cov_inv, delta))
]