- Journal Home
- Volume 39 - 2021
- Volume 38 - 2020
- Volume 37 - 2019
- Volume 36 - 2018
- Volume 35 - 2017
- Volume 34 - 2016
- Volume 33 - 2015
- Volume 32 - 2014
- Volume 31 - 2013
- Volume 30 - 2012
- Volume 29 - 2011
- Volume 28 - 2010
- Volume 27 - 2009
- Volume 26 - 2008
- Volume 25 - 2007
- Volume 24 - 2006
- Volume 23 - 2005
- Volume 22 - 2004
- Volume 21 - 2003
- Volume 20 - 2002
- Volume 19 - 2001
- Volume 18 - 2000
- Volume 17 - 1999
- Volume 16 - 1998
- Volume 15 - 1997
- Volume 14 - 1996
- Volume 13 - 1995
- Volume 12 - 1994
- Volume 11 - 1993
- Volume 10 - 1992
- Volume 9 - 1991
- Volume 8 - 1990
- Volume 7 - 1989
- Volume 6 - 1988
- Volume 5 - 1987
- Volume 4 - 1986
- Volume 3 - 1985
- Volume 2 - 1984
- Volume 1 - 1983
Extrapush for Convex Smooth Decentralized Optimization Over Directed Networks
J. Comp. Math., 35 (2017), pp. 383-396.
Published online: 2017-08
[An open-access article; the PDF is free to any online user.]
- BibTex
- RIS
- TXT
@Article{JCM-35-383,
author = {Zeng , Jinshan and Yin , Wotao },
title = {Extrapush for Convex Smooth Decentralized Optimization Over Directed Networks},
journal = {Journal of Computational Mathematics},
year = {2017},
volume = {35},
number = {4},
pages = {383--396},
abstract = { In this note, we extend the algorithms Extra [13] and subgradient-push [10] to a new algorithm ExtraPush for consensus optimization with convex differentiable objective functions over a directed network. When the stationary distribution of the network can be computed in advance, we propose a simplified algorithm called Normalized ExtraPush. Just like Extra, both ExtraPush and Normalized ExtraPush can iterate with a fixed step size. But unlike Extra, they can take a column-stochastic mixing matrix, which is not necessarily doubly stochastic. Therefore, they remove the undirected-network restriction of Extra. Subgradient-push, while also works for directed networks, is slower on the same type of problem because it must use a sequence of diminishing step sizes. We present preliminary analysis for ExtraPush under a bounded sequence assumption. For Normalized ExtraPush, we show that it naturally produces a bounded, linearly convergent sequence provided that the objective function is strongly convex. In our numerical experiments, ExtraPush and Normalized ExtraPush performed similarly well. They are significantly faster than subgradient-push, even when we hand-optimize the step sizes for the latter.},
issn = {1991-7139},
doi = {https://doi.org/10.4208/jcm.1606-m2015-0452},
url = {http://global-sci.org/intro/article_detail/jcm/10022.html}
}
TY - JOUR
T1 - Extrapush for Convex Smooth Decentralized Optimization Over Directed Networks
AU - Zeng , Jinshan
AU - Yin , Wotao
JO - Journal of Computational Mathematics
VL - 4
SP - 383
EP - 396
PY - 2017
DA - 2017/08
SN - 35
DO - http://doi.org/10.4208/jcm.1606-m2015-0452
UR - https://global-sci.org/intro/article_detail/jcm/10022.html
KW - Decentralized optimization
KW - Directed graph
KW - Consensus
KW - Non-doubly stochastic
KW - Extra
AB - In this note, we extend the algorithms Extra [13] and subgradient-push [10] to a new algorithm ExtraPush for consensus optimization with convex differentiable objective functions over a directed network. When the stationary distribution of the network can be computed in advance, we propose a simplified algorithm called Normalized ExtraPush. Just like Extra, both ExtraPush and Normalized ExtraPush can iterate with a fixed step size. But unlike Extra, they can take a column-stochastic mixing matrix, which is not necessarily doubly stochastic. Therefore, they remove the undirected-network restriction of Extra. Subgradient-push, while also works for directed networks, is slower on the same type of problem because it must use a sequence of diminishing step sizes. We present preliminary analysis for ExtraPush under a bounded sequence assumption. For Normalized ExtraPush, we show that it naturally produces a bounded, linearly convergent sequence provided that the objective function is strongly convex. In our numerical experiments, ExtraPush and Normalized ExtraPush performed similarly well. They are significantly faster than subgradient-push, even when we hand-optimize the step sizes for the latter.
Jinshan Zeng & Wotao Yin. (2020). Extrapush for Convex Smooth Decentralized Optimization Over Directed Networks.
Journal of Computational Mathematics. 35 (4).
383-396.
doi:10.4208/jcm.1606-m2015-0452
Copy to clipboard