@Article{JML-2-259, author = {Wu , Lei}, title = {Embedding Inequalities for Barron-Type Spaces}, journal = {Journal of Machine Learning}, year = {2023}, volume = {2}, number = {4}, pages = {259--270}, abstract = {

An important problem in machine learning theory is to understand the approximation and generalization properties of two-layer neural networks in high dimensions. To this end, researchers have introduced the Barron space $\mathcal{B}_s(Ω)$ and the spectral Barron space $\mathcal{F}_s(Ω),$ where the index $s ∈ [0, ∞)$ indicates the smoothness of functions within these spaces and $Ω ⊂ \mathbb{R}^d$ denotes the input domain. However, the precise relationship between the two types of Barron spaces remains unclear. In this paper, we establish a continuous embedding between them as implied by the following inequality: For any $\delta∈ (0, 1),s ∈ \mathbb{N}^+$ and $f : Ω\mapsto \mathbb{R},$ it holds that $$\delta||f||_{\mathcal{F}_{s−\delta}(Ω)}\lesssim_s||f||_{\mathcal{B}_s(Ω)}\lesssim_s||f||_{\mathcal{F}_{s+1}(Ω).}$$ Importantly, the constants do not depend on the input dimension $d,$ suggesting that the embedding is effective in high dimensions. Moreover, we also show that the lower and upper bound are both tight.

}, issn = {2790-2048}, doi = {https://doi.org/10.4208/jml.230530}, url = {http://global-sci.org/intro/article_detail/jml/22307.html} }