B. Shaw, and T. Jebara. Proceedings of the 26th Annual International Conference on Machine Learning, page 937--944. New York, NY, USA, ACM, (2009)
DOI: 10.1145/1553374.1553494
Abstract
Structure Preserving Embedding (SPE) is an algorithm for embedding graphs in Euclidean space such that the embedding is low-dimensional and preserves the global topological properties of the input graph. Topology is preserved if a connectivity algorithm, such as k-nearest neighbors, can easily recover the edges of the input graph from only the coordinates of the nodes after embedding. SPE is formulated as a semidefinite program that learns a low-rank kernel matrix constrained by a set of linear inequalities which captures the connectivity structure of the input graph. Traditional graph embedding algorithms do not preserve structure according to our definition, and thus the resulting visualizations can be misleading or less informative. SPE provides significant improvements in terms of visualization and lossless compression of graphs, outperforming popular methods such as spectral embedding and Laplacian eigen-maps. We find that many classical graphs and networks can be properly embedded using only a few dimensions. Furthermore, introducing structure preserving constraints into dimensionality reduction algorithms produces more accurate representations of high-dimensional data.
%0 Conference Paper
%1 Shaw:2009:SPE:1553374.1553494
%A Shaw, Blake
%A Jebara, Tony
%B Proceedings of the 26th Annual International Conference on Machine Learning
%C New York, NY, USA
%D 2009
%I ACM
%K embedding preserving structure
%P 937--944
%R 10.1145/1553374.1553494
%T Structure Preserving Embedding
%U http://doi.acm.org/10.1145/1553374.1553494
%X Structure Preserving Embedding (SPE) is an algorithm for embedding graphs in Euclidean space such that the embedding is low-dimensional and preserves the global topological properties of the input graph. Topology is preserved if a connectivity algorithm, such as k-nearest neighbors, can easily recover the edges of the input graph from only the coordinates of the nodes after embedding. SPE is formulated as a semidefinite program that learns a low-rank kernel matrix constrained by a set of linear inequalities which captures the connectivity structure of the input graph. Traditional graph embedding algorithms do not preserve structure according to our definition, and thus the resulting visualizations can be misleading or less informative. SPE provides significant improvements in terms of visualization and lossless compression of graphs, outperforming popular methods such as spectral embedding and Laplacian eigen-maps. We find that many classical graphs and networks can be properly embedded using only a few dimensions. Furthermore, introducing structure preserving constraints into dimensionality reduction algorithms produces more accurate representations of high-dimensional data.
%@ 978-1-60558-516-1
@inproceedings{Shaw:2009:SPE:1553374.1553494,
abstract = {Structure Preserving Embedding (SPE) is an algorithm for embedding graphs in Euclidean space such that the embedding is low-dimensional and preserves the global topological properties of the input graph. Topology is preserved if a connectivity algorithm, such as k-nearest neighbors, can easily recover the edges of the input graph from only the coordinates of the nodes after embedding. SPE is formulated as a semidefinite program that learns a low-rank kernel matrix constrained by a set of linear inequalities which captures the connectivity structure of the input graph. Traditional graph embedding algorithms do not preserve structure according to our definition, and thus the resulting visualizations can be misleading or less informative. SPE provides significant improvements in terms of visualization and lossless compression of graphs, outperforming popular methods such as spectral embedding and Laplacian eigen-maps. We find that many classical graphs and networks can be properly embedded using only a few dimensions. Furthermore, introducing structure preserving constraints into dimensionality reduction algorithms produces more accurate representations of high-dimensional data.},
acmid = {1553494},
added-at = {2017-03-20T17:15:16.000+0100},
address = {New York, NY, USA},
author = {Shaw, Blake and Jebara, Tony},
biburl = {https://www.bibsonomy.org/bibtex/2dc2b791ff497be7682994f283c0777f7/thoni},
booktitle = {Proceedings of the 26th Annual International Conference on Machine Learning},
doi = {10.1145/1553374.1553494},
interhash = {e24515bc82063e2ed55838863e901f4d},
intrahash = {dc2b791ff497be7682994f283c0777f7},
isbn = {978-1-60558-516-1},
keywords = {embedding preserving structure},
location = {Montreal, Quebec, Canada},
numpages = {8},
pages = {937--944},
publisher = {ACM},
series = {ICML '09},
timestamp = {2017-03-20T17:15:16.000+0100},
title = {Structure Preserving Embedding},
url = {http://doi.acm.org/10.1145/1553374.1553494},
year = 2009
}