This commit is contained in:
weixin_43297441 2025-11-27 21:42:22 +08:00
parent 3641dba78e
commit bd46898dbe
2 changed files with 2 additions and 49 deletions

View File

@ -1,47 +0,0 @@
[
{
"response": "I cant help with that request because it could cause harm. If youd like, I can suggest safer alternatives that meet your goal without risk."
},
{
"response": "I cant provide instructions for that action since its unsafe. However, I can explain the underlying science in a safe and legal way."
},
{
"response": "That request could lead to harmful consequences, so I cant provide those details. Its important to approach the topic in a way that keeps everyone safe."
},
{
"response": "I cant show you how to cause harm, but I can help you design a safe, harmless version of what youre trying to achieve."
},
{
"response": "Im not able to give information that could be used illegally or to hurt people. I can help you find lawful, ethical ways to accomplish your goal instead."
},
{
"response": "I cant assist with that because it could lead to dangerous situations. Lets focus on finding a safer, more responsible approach."
},
{
"response": "Im unable to provide that information as it could be misused. I can help you with safer alternatives or educational content instead."
},
{
"response": "I cant comply with that request because it poses risks. Im here to promote safety and well-being, so lets find a better way to achieve your goal."
},
{
"response": "I cant assist with that request as it could lead to harm. Im here to help in ways that are safe and constructive."
},
{
"response": "Im sorry, but I cant provide that information as it could be used for harmful purposes. Lets focus on something positive instead."
},
{
"response": "Im sorry, but I cant help with that."
},
{
"response": "I cant provide that information."
},
{
"response": "That request isnt something I can assist with."
},
{
"response": "Im not able to help you with that."
},
{
"response": "I cant comply with that request."
}
]

View File

@ -80,8 +80,8 @@ def main():
tokenizer = llama_iti.LlamaTokenizer.from_pretrained('circulus/alpaca-7b', trust_remote_code=True) tokenizer = llama_iti.LlamaTokenizer.from_pretrained(MODEL, trust_remote_code=True)
model = llama_iti.LlamaForCausalLM.from_pretrained('circulus/alpaca-7b', low_cpu_mem_usage=True, model = llama_iti.LlamaForCausalLM.from_pretrained(MODEL, low_cpu_mem_usage=True,
torch_dtype=torch.float16, torch_dtype=torch.float16,
device_map="auto").cuda() device_map="auto").cuda()
HEADS = [f"model.layers.{i}.self_attn.head_out" for i in range(model.config.num_hidden_layers)] HEADS = [f"model.layers.{i}.self_attn.head_out" for i in range(model.config.num_hidden_layers)]