@inproceedings{67a6e4f3a553420b9cfed9c0b6bbbbe8,
title = "Investigating How Data Poising Attacks Can Impact An EEG-Based Federated Learning Model",
abstract = "Detecting potential security threats from individuals within an organization can be achieved using an Electroencephalogram (EEG), which captures the brain's electrical activity. The concept is based on the premise that certain brainwave patterns might be associated with malicious intentions or deceptive behaviors. Recent research on insider threat detection has utilized traditional machine learning classifiers to recognize patterns in brainwave data that correlate with malicious intent. However, these methods pose privacy and data security concerns because they require access to all user data. A recently introduced framework, Federated Learning (FL), offers a solution to this problem. FL aims to develop a global model classifier without the need to access users' local data, thus safeguarding their privacy and sensitive information. Thus, we developed an FL-based insider threat detection model trained on a dataset that contains the EEG signals of 17 participants captured from five electrodes across five power bands using the Emotiv Insight. The model's accuracy within our framework attained a rate of (94.71%) for MLP. However, this method faces potential security threats and attacks, as clients could act maliciously, or external malicious actors might disrupt the network. Therefore, we additionally explore the data poisoning attacks, emphasizing label-flipping scenarios within our federated learning system for EEG-based insider threat detection and illustrating how factors such as the number of poisoned clients and the percentage of poisoning affect an FL-based system. Based on our findings, a higher number of poisoned clients is much more damaging to FL-based systems and should thus be a focal point of consideration in the security design process of these systems.",
keywords = "artificial intelligence, data poisoning, deep learning, EEG signals, federated learning, insider threat, label flipping, logistic regression, machine learning, multilayer perceptron, single feed-forward network",
author = "Shamma Alshebli and Muna Alshehhi and Yeun, {Chan Yeob}",
note = "Publisher Copyright: {\textcopyright} 2024 IEEE.; 2nd International Conference on Cyber Resilience, ICCR 2024 ; Conference date: 26-02-2024 Through 28-02-2024",
year = "2024",
doi = "10.1109/ICCR61006.2024.10532875",
language = "British English",
series = "2nd International Conference on Cyber Resilience, ICCR 2024",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "2nd International Conference on Cyber Resilience, ICCR 2024",
address = "United States",
}