@inproceedings{cc9e60249885497085e998ca8fa90c25,
title = "Secure Your Model: An Effective Key Prompt Protection Mechanism for Large Language Models",
abstract = "Large language models (LLMs) have notably revolutionized many domains within natural language processing due to their exceptional performance. Their security has become increasingly vital. This study is centered on protecting LLMs against unauthorized access and potential theft. We propose a simple yet effective protective measure wherein a unique key prompt is embedded within the LLM. This mechanism enables the model to respond only when presented with the correct key prompt; otherwise, LLMs will refuse to react to any input instructions. This key prompt protection offers a robust solution to prevent the unauthorized use of LLMs, as the model becomes unusable without the correct key. We evaluated the proposed protection on multiple LLMs and NLP tasks. Results demonstrate that our method can successfully protect the LLM without significantly impacting the model's original function. Moreover, we demonstrate potential attacks that attempt to bypass the protection mechanism will adversely affect the model's performance, further emphasizing the effectiveness of the proposed protection method.",
author = "Ruixiang Tang and Chuang, {Yu Neng} and Xuanting Cai and Mengnan Du and Xia Hu",
note = "Publisher Copyright: {\textcopyright} 2024 Association for Computational Linguistics.; 2024 Findings of the Association for Computational Linguistics: NAACL 2024 ; Conference date: 16-06-2024 Through 21-06-2024",
year = "2024",
language = "English (US)",
series = "Findings of the Association for Computational Linguistics: NAACL 2024 - Findings",
publisher = "Association for Computational Linguistics (ACL)",
pages = "4061--4073",
editor = "Kevin Duh and Helena Gomez and Steven Bethard",
booktitle = "Findings of the Association for Computational Linguistics",
address = "United States",
}