The concept of dimension is essential to grasp the complexity of data. A
naive approach to determine the dimension of a dataset is based on the number
of attributes. More sophisticated methods derive a notion of intrinsic
dimension (ID) that employs more complex feature functions, e.g., distances
between data points. Yet, many of these approaches are based on empirical
observations, cannot cope with the geometric character of contemporary
datasets, and do lack an axiomatic foundation. A different approach was
proposed by V. Pestov, who links the intrinsic dimension axiomatically to the
mathematical concentration of measure phenomenon. First methods to compute this
and related notions for ID were computationally intractable for large-scale
real-world datasets. In the present work, we derive a computationally feasible
method for determining said axiomatic ID functions. Moreover, we demonstrate
how the geometric properties of complex data are accounted for in our modeling.
In particular, we propose a principle way to incorporate neighborhood
information, as in graph data, into the ID. This allows for new insights into
common graph learning procedures, which we illustrate by experiments on the
Open Graph Benchmark.
Description
Intrinsic Dimension for Large-Scale Geometric Learning
%0 Journal Article
%1 stubbemann2022intrinsic
%A Stubbemann, Maximilian
%A Hanika, Tom
%A Schneider, Friedrich Martin
%D 2023
%J Transactions on Machine Learning Research
%K 2023 LOEWE dimcurse dimension intrinsic itegpub kde kdepub myown publist
%T Intrinsic Dimension for Large-Scale Geometric Learning
%U https://openreview.net/forum?id=85BfDdYMBY
%X The concept of dimension is essential to grasp the complexity of data. A
naive approach to determine the dimension of a dataset is based on the number
of attributes. More sophisticated methods derive a notion of intrinsic
dimension (ID) that employs more complex feature functions, e.g., distances
between data points. Yet, many of these approaches are based on empirical
observations, cannot cope with the geometric character of contemporary
datasets, and do lack an axiomatic foundation. A different approach was
proposed by V. Pestov, who links the intrinsic dimension axiomatically to the
mathematical concentration of measure phenomenon. First methods to compute this
and related notions for ID were computationally intractable for large-scale
real-world datasets. In the present work, we derive a computationally feasible
method for determining said axiomatic ID functions. Moreover, we demonstrate
how the geometric properties of complex data are accounted for in our modeling.
In particular, we propose a principle way to incorporate neighborhood
information, as in graph data, into the ID. This allows for new insights into
common graph learning procedures, which we illustrate by experiments on the
Open Graph Benchmark.
@article{stubbemann2022intrinsic,
abstract = {The concept of dimension is essential to grasp the complexity of data. A
naive approach to determine the dimension of a dataset is based on the number
of attributes. More sophisticated methods derive a notion of intrinsic
dimension (ID) that employs more complex feature functions, e.g., distances
between data points. Yet, many of these approaches are based on empirical
observations, cannot cope with the geometric character of contemporary
datasets, and do lack an axiomatic foundation. A different approach was
proposed by V. Pestov, who links the intrinsic dimension axiomatically to the
mathematical concentration of measure phenomenon. First methods to compute this
and related notions for ID were computationally intractable for large-scale
real-world datasets. In the present work, we derive a computationally feasible
method for determining said axiomatic ID functions. Moreover, we demonstrate
how the geometric properties of complex data are accounted for in our modeling.
In particular, we propose a principle way to incorporate neighborhood
information, as in graph data, into the ID. This allows for new insights into
common graph learning procedures, which we illustrate by experiments on the
Open Graph Benchmark.},
added-at = {2022-10-12T10:58:53.000+0200},
author = {Stubbemann, Maximilian and Hanika, Tom and Schneider, Friedrich Martin},
biburl = {https://www.bibsonomy.org/bibtex/2292ea436563609314995751c9edb3c43/tomhanika},
description = {Intrinsic Dimension for Large-Scale Geometric Learning},
interhash = {cacd06fa89984f79f4512e2e7b15b49a},
intrahash = {292ea436563609314995751c9edb3c43},
issn = {2835-8856},
journal = {Transactions on Machine Learning Research},
keywords = {2023 LOEWE dimcurse dimension intrinsic itegpub kde kdepub myown publist},
timestamp = {2023-04-16T20:56:44.000+0200},
title = {Intrinsic Dimension for Large-Scale Geometric Learning},
url = {https://openreview.net/forum?id=85BfDdYMBY},
year = 2023
}