@inproceedings{5cc4edfc3c3f4266ac92e73f17cd2976,
title = "MG-WFBP: Efficient Data Communication for Distributed Synchronous SGD Algorithms",
abstract = "Distributed synchronous stochastic gradient descent has been widely used to train deep neural networks on computer clusters. With the increase of computational power, network communications have become one limiting factor on the system scalability. In this paper, we observe that many deep neural networks have a large number of layers with only a small amount of data to be communicated. Based on the fact that merging some short communication tasks into a single one may reduce the overall communication time, we formulate an optimization problem to minimize the training iteration time. We develop an optimal solution named merged-gradient wait-free backpropagation (MG-WFBP) and implement it in our open-source deep learning platform B-Caffe. Our experimental results on an 8-node GPU cluster with 10GbE interconnect and trace-based simulation results on a 64-node cluster both show that the MG-WFBP algorithm can achieve much better scaling efficiency than existing methods WFBP and SyncEASGD.",
keywords = "Deep Learning, Distributed Stochastic Gradient Descent, GPU, Gradient Communication, Merged-gradient",
author = "Shaohuai Shi and Xiaowen CHU and Bo Li",
note = "Funding Information: The research was supported in part by Hong Kong RGC GRF grants under the contracts HKBU 12200418, HKUST 16206417 and 16207818, a RGC CRF grant under the contract C7036-15G.; 2019 IEEE Conference on Computer Communications, INFOCOM 2019 ; Conference date: 29-04-2019 Through 02-05-2019",
year = "2019",
month = apr,
doi = "10.1109/INFOCOM.2019.8737367",
language = "English",
series = "Proceedings - IEEE INFOCOM",
publisher = "IEEE",
pages = "172--180",
booktitle = "INFOCOM 2019 - IEEE Conference on Computer Communications",
address = "United States",
}