@inproceedings{c895a96cdd2d433aa6306cde49cdd4f3,
title = "Multi-cue visual tracking using robust feature-level fusion based on joint sparse representation",
abstract = "The use of multiple features for tracking has been proved as an effective approach because limitation of each feature could be compensated. Since different types of variations such as illumination, occlusion and pose may happen in a video sequence, especially long sequence videos, how to dynamically select the appropriate features is one of the key problems in this approach. To address this issue in multicue visual tracking, this paper proposes a new joint sparse representation model for robust feature-level fusion. The proposed method dynamically removes unreliable features to be fused for tracking by using the advantages of sparse representation. As a result, robust tracking performance is obtained. Experimental results on publicly available videos show that the proposed method outperforms both existing sparse representation based and fusion-based trackers.",
author = "Xiangyuan Lan and Ma, {Andy Jinhua} and Yuen, {Pong Chi}",
year = "2014",
month = sep,
day = "24",
doi = "10.1109/CVPR.2014.156",
language = "English",
series = "Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
publisher = "IEEE Computer Society",
pages = "1194--1201",
booktitle = "Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
address = "United States",
note = "27th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2014 ; Conference date: 23-06-2014 Through 28-06-2014",
}