@inproceedings{a19f21c955ab4112adc73f61b831db22,
title = "Audio-visual speaker recognition via multi-modal correlated neural networks",
abstract = "Multi-modal speaker recognition has received a lot of attention in recent years due to the growing security demands in real applications. In this paper, we present an efficient audiovisual speaker recognition method by fusing face and audio via the multi-modal correlated neural networks. Within our proposed approach, the facial features learned by convolutional neural networks are compatible with audio features at high-level and the heterogeneous multi-modal features can be learned automatically. Accordingly, we propose a correlated neural networks to fuse the face and audio modalities at different level such that the speaker identity can be well identified. The experimental results have shown that our proposed multi-modal speaker recognition approach can produce better performance than single modality, and the feature-level fusion yields comparative and even better results than the decision-level case.",
author = "Jiajia Geng and Xin Liu and CHEUNG, {Yiu Ming}",
note = "Publisher Copyright: {\textcopyright} 2016 IEEE. Copyright: Copyright 2017 Elsevier B.V., All rights reserved.; 2016 IEEE/WIC/ACM International Conference on Web Intelligence Workshops, WIW 2016 ; Conference date: 13-10-2016 Through 16-10-2016",
year = "2017",
month = jan,
day = "11",
doi = "10.1109/WIW.2016.47",
language = "English",
series = "Proceedings - 2016 IEEE/WIC/ACM International Conference on Web Intelligence Workshops, WIW 2016",
publisher = "IEEE",
pages = "123--128",
booktitle = "Proceedings - 2016 IEEE/WIC/ACM International Conference on Web Intelligence Workshops, WIW 2016",
address = "United States",
}