Ehsan Nowroozi, Mohammadreza Mohammadi, Pargol Golmohammadi, Yassine Mekdad, Mauro Conti, A. Selcuk Uluagac
Resisting Deep Learning Models Against Adversarial Attack Transferability Via Feature Randomization Journal Article
IEEE Transactions on Services Computing Journal, 2023.
Abstract | Links | BibTeX | Tags: Adverserial Machine Learning, Machine Learning Security
@article{Ehsan2023ML,
title = {Resisting Deep Learning Models Against Adversarial Attack Transferability Via Feature Randomization},
author = {Ehsan Nowroozi and Mohammadreza Mohammadi and Pargol Golmohammadi and Yassine Mekdad and Mauro Conti and A. Selcuk Uluagac},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=10315205},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
journal = {IEEE Transactions on Services Computing Journal},
abstract = {In the past decades, the rise of artificial intelligence has given us the capabilities to solve the most challenging problems in our day-to-day lives, such as cancer prediction and autonomous navigation. However, these applications might not be reliable if not secured against adversarial attacks. In addition, recent works demonstrated that some adversarial examples are transferable across different models. Therefore, it is crucial to avoid such transferability via robust models that resist adversarial manipulations. In this paper, we propose a feature randomization-based approach that resists eight adversarial attacks targeting deep learning models in the testing phase. Our novel approach consists of changing the training strategy in the target network classifier and selecting random feature samples. We consider the attacker with a Limited-Knowledge and Semi-Knowledge conditions to undertake the most prevalent types of adversarial attacks. We evaluate the robustness of our approach using the well-known UNSW-NB15 datasets that include realistic and synthetic attacks. Afterward, we demonstrate that our strategy outperforms the existing state-of-the-art approach, such as the Most Powerful Attack, which consists of fine-tuning the network model against specific adversarial attacks. Further, we demonstrate the practicality of our approach using the VIPPrint dataset through a comprehensive set of experiments. Finally, our experimental results show that our methodology can secure the target network and resists adversarial attack transferability by over 60%.},
keywords = {Adverserial Machine Learning, Machine Learning Security},
pubstate = {published},
tppubtype = {article}
}
Amit Kumar Sikder, Leonardo Babun, Z. Berkay Celik, Hidayet Aksu, Patrick McDaniel, Engin Kirda, A. Selcuk Uluagac
Who’s Controlling My Device? Multi-User Multi-Device-Aware Access Control System for Shared Smart Home Environment Journal Article
ACM Transactions on Internet of Things Journal, 2022.
Abstract | Links | BibTeX | Tags: Adverserial Machine Learning, Malware
@article{SikderControl2022,
title = {Who’s Controlling My Device? Multi-User Multi-Device-Aware Access Control System for Shared Smart Home Environment},
author = {Amit Kumar Sikder and Leonardo Babun and Z. Berkay Celik and Hidayet Aksu and Patrick McDaniel and Engin Kirda and A. Selcuk Uluagac},
url = {https://doi.org/10.1145/3543513},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-01},
journal = {ACM Transactions on Internet of Things Journal},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Multiple users have access to multiple devices in a smart home system typically through a dedicated app installed on a mobile device. Traditional access control mechanisms consider one unique, trusted user that controls access to the devices. However, multi-user multi-device smart home settings pose fundamentally different challenges to traditional single-user systems. For instance, in a multi-user environment, users have conflicting, complex, and dynamically-changing demands on multiple devices that cannot be handled by traditional access control techniques. Moreover, smart devices from different platforms/vendors can share the same home environment, making existing access control obsolete for smart home systems. To address these challenges, in this paper, we introduce Kratos+, a novel multi-user and multi-device-aware access control mechanism that allows smart home users to flexibly specify their},
keywords = {Adverserial Machine Learning, Malware},
pubstate = {published},
tppubtype = {article}
}
Harun Oz, Faraz Naseem, Ahmet Aris, Abbas Acar, Guliz Seray Tuncay, A Selcuk Uluagac
Feasibility of Malware Visualization Techniques against Adversarial Machine Learning Attacks Demo/Poster
In the Proceedings of the 43rd IEEE Symposium on Security and Privacy (S&P), 2022.
BibTeX | Tags: Adverserial Machine Learning, Malware
@Demo/Posters{Oz2022MalwareVisualization,
title = {Feasibility of Malware Visualization Techniques against Adversarial Machine Learning Attacks},
author = {Harun Oz and Faraz Naseem and Ahmet Aris and Abbas Acar and Guliz Seray Tuncay and A Selcuk Uluagac},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {In the Proceedings of the 43rd IEEE Symposium on Security and Privacy (S&P)},
keywords = {Adverserial Machine Learning, Malware},
pubstate = {published},
tppubtype = {Demo/Posters}
}
AKM Iqtidar Newaz, Nur Imtiazul Haque, Amit Kumar Sikder, Mohammad Ashiqur Rahman, A Selcuk Uluagac
Adversarial attacks to machine learning-based smart healthcare systems Conference Paper
In the proceedings of the IEEE Global Communications Conference (GLOBECOM), IEEE 2020.
Abstract | Links | BibTeX | Tags: Adverserial Machine Learning, Smart Home Security
@conference{newaz2020adversarial,
title = {Adversarial attacks to machine learning-based smart healthcare systems},
author = {AKM Iqtidar Newaz and Nur Imtiazul Haque and Amit Kumar Sikder and Mohammad Ashiqur Rahman and A Selcuk Uluagac},
url = {https://ieeexplore.ieee.org/document/9322472},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
booktitle = {In the proceedings of the IEEE Global Communications Conference (GLOBECOM)},
organization = {IEEE},
abstract = {The increasing availability of healthcare data requires accurate analysis of disease diagnosis, progression, and real-time monitoring to provide improved treatments to the patients. In this context, Machine Learning (ML) models are used to extract valuable features and insights from high-dimensional and heterogeneous healthcare data to detect different diseases and patient activities in a Smart Healthcare System (SHS). However, recent researches show that ML models used in different application domains are vulnerable to adversarial attacks. In this paper, we introduce a new type of adversarial attacks to exploit the ML classifiers used in a SHS. We consider an adversary who has partial knowledge of data distribution, SHS model, and ML algorithm to perform both targeted and untargeted attacks. Employing these adversarial capabilities, we manipulate medical device readings to alter patient status,},
keywords = {Adverserial Machine Learning, Smart Home Security},
pubstate = {published},
tppubtype = {conference}
}
Citations: 8413
h-index: 44
i10-index: 107