We show that passing input points through a simple Fourier feature mapping
enables a multilayer perceptron (MLP) to learn high-frequency functions in
low-dimensional problem domains. These results shed light on recent advances in
computer vision and graphics that achieve state-of-the-art results by using
MLPs to represent complex 3D objects and scenes. Using tools from the neural
tangent kernel (NTK) literature, we show that a standard MLP fails to learn
high frequencies both in theory and in practice. To overcome this spectral
bias, we use a Fourier feature mapping to transform the effective NTK into a
stationary kernel with a tunable bandwidth. We suggest an approach for
selecting problem-specific Fourier features that greatly improves the
performance of MLPs for low-dimensional regression tasks relevant to the
computer vision and graphics communities.
Description
Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains
%0 Generic
%1 tancik2020fourier
%A Tancik, Matthew
%A Srinivasan, Pratul P.
%A Mildenhall, Ben
%A Fridovich-Keil, Sara
%A Raghavan, Nithin
%A Singhal, Utkarsh
%A Ramamoorthi, Ravi
%A Barron, Jonathan T.
%A Ng, Ren
%D 2020
%K nerf
%T Fourier Features Let Networks Learn High Frequency Functions in Low
Dimensional Domains
%U http://arxiv.org/abs/2006.10739
%X We show that passing input points through a simple Fourier feature mapping
enables a multilayer perceptron (MLP) to learn high-frequency functions in
low-dimensional problem domains. These results shed light on recent advances in
computer vision and graphics that achieve state-of-the-art results by using
MLPs to represent complex 3D objects and scenes. Using tools from the neural
tangent kernel (NTK) literature, we show that a standard MLP fails to learn
high frequencies both in theory and in practice. To overcome this spectral
bias, we use a Fourier feature mapping to transform the effective NTK into a
stationary kernel with a tunable bandwidth. We suggest an approach for
selecting problem-specific Fourier features that greatly improves the
performance of MLPs for low-dimensional regression tasks relevant to the
computer vision and graphics communities.
@misc{tancik2020fourier,
abstract = {We show that passing input points through a simple Fourier feature mapping
enables a multilayer perceptron (MLP) to learn high-frequency functions in
low-dimensional problem domains. These results shed light on recent advances in
computer vision and graphics that achieve state-of-the-art results by using
MLPs to represent complex 3D objects and scenes. Using tools from the neural
tangent kernel (NTK) literature, we show that a standard MLP fails to learn
high frequencies both in theory and in practice. To overcome this spectral
bias, we use a Fourier feature mapping to transform the effective NTK into a
stationary kernel with a tunable bandwidth. We suggest an approach for
selecting problem-specific Fourier features that greatly improves the
performance of MLPs for low-dimensional regression tasks relevant to the
computer vision and graphics communities.},
added-at = {2022-08-18T10:18:03.000+0200},
author = {Tancik, Matthew and Srinivasan, Pratul P. and Mildenhall, Ben and Fridovich-Keil, Sara and Raghavan, Nithin and Singhal, Utkarsh and Ramamoorthi, Ravi and Barron, Jonathan T. and Ng, Ren},
biburl = {https://www.bibsonomy.org/bibtex/28995d4ed9e9875d0a373a92c1905302d/m_gabriel},
description = {Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains},
interhash = {6fed9f77dea680e6ab7d7f159db8a3d3},
intrahash = {8995d4ed9e9875d0a373a92c1905302d},
keywords = {nerf},
note = {cite arxiv:2006.10739Comment: Project page: https://people.eecs.berkeley.edu/~bmild/fourfeat/},
timestamp = {2022-08-18T10:18:03.000+0200},
title = {Fourier Features Let Networks Learn High Frequency Functions in Low
Dimensional Domains},
url = {http://arxiv.org/abs/2006.10739},
year = 2020
}