@inproceedings{7e45ba02f155499592b96935108a11af,
title = "Un-Fair Trojan: Targeted Backdoor Attacks Against Model Fairness",
abstract = "Machine learning models have proven to have the ability to make accurate predictions on complex data tasks such as image and graph data. However, they are vulnerable to various backdoor and data poisoning attacks which adversely affect model behavior. These attacks become more prevalent and complex in federated learning, where multiple local models contribute to a single global model communicating using only local gradients. Additionally, these models tend to make unfair predictions for certain protected features. Previously published works revolve around solving these issues both individually and jointly. However, there has been little study on how the adversary can launch an attack that can control model fairness. Demonstrated in this work, a flexible attack, which we call Un-Fair Trojan, that targets model fairness while remaining stealthy can have devastating effects against machine learning models, increasing their demographic parity by up to 30%, without causing a significant decrease in the model accuracy.",
keywords = "Back-door Attacks, Fair Machine Learning, Federated Learning, Machine Learning",
author = "Nicholas Furth and Abdallah Khreishah and Guanxiong Liu and Phan, {Nhat Hai} and Yasser Jararweh",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.; 9th International Conference on Software Defined Systems, SDS 2022 ; Conference date: 12-12-2022 Through 15-12-2022",
year = "2022",
doi = "10.1109/SDS57574.2022.10062890",
language = "English (US)",
series = "2022 9th International Conference on Software Defined Systems, SDS 2022",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
editor = "Larbi Boubshir and Boubaker Daachi and Abdellah Mokrane and Yaser Jararweh and Benkhelifa Elhadj",
booktitle = "2022 9th International Conference on Software Defined Systems, SDS 2022",
address = "United States",
}