@inproceedings{b0c62567289e4cd68acfd2bcb2fe3247,
title = "Fast Local Attack: Generating Local Adversarial Examples for Object Detectors",
abstract = "The deep neural network is vulnerable to adversarial examples. Adding imperceptible adversarial perturbations to images is enough to make them fail. Most existing research focuses on attacking image classifiers or anchor-based object detectors, but they generate globally perturbation on the whole image, which is unnecessary. In our work, we leverage higher-level semantic information to generate high aggressive local perturbations for anchor-free object detectors. As a result, it is less computationally intensive and achieves a higher black-box attack as well as transferring attack performance. The adversarial examples generated by our method are not only capable of attacking anchor-free object detectors, but also able to be transferred to attack anchor-based object detector.",
keywords = "adversarial attack, fast local attack, object detection",
author = "Quanyu Liao and Xin Wang and Bin Kong and Siwei Lyu and Youbing Yin and Qi Song and Xi Wu",
note = "Publisher Copyright: {\textcopyright} 2020 IEEE.; 2020 International Joint Conference on Neural Networks, IJCNN 2020 ; Conference date: 19-07-2020 Through 24-07-2020",
year = "2020",
month = jul,
doi = "10.1109/IJCNN48605.2020.9206811",
language = "English",
series = "Proceedings of the International Joint Conference on Neural Networks",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
booktitle = "2020 International Joint Conference on Neural Networks, IJCNN 2020 - Proceedings",
}