We introduce a self-supervised approach for learning node and graph level
representations by contrasting structural views of graphs. We show that unlike
visual representation learning, increasing the number of views to more than two
or contrasting multi-scale encodings do not improve performance, and the best
performance is achieved by contrasting encodings from first-order neighbors and
a graph diffusion. We achieve new state-of-the-art results in self-supervised
learning on 8 out of 8 node and graph classification benchmarks under the
linear evaluation protocol. For example, on Cora (node) and Reddit-Binary
(graph) classification benchmarks, we achieve 86.8% and 84.5% accuracy, which
are 5.5% and 2.4% relative improvements over previous state-of-the-art. When
compared to supervised baselines, our approach outperforms them in 4 out of 8
benchmarks. Source code is released at: https://github.com/kavehhassani/mvgrl
Beschreibung
[2006.05582] Contrastive Multi-View Representation Learning on Graphs
%0 Generic
%1 hassani2020contrastive
%A Hassani, Kaveh
%A Khasahmadi, Amir Hosein
%D 2020
%K 2020 deep-learning graph multi-view
%T Contrastive Multi-View Representation Learning on Graphs
%U http://arxiv.org/abs/2006.05582
%X We introduce a self-supervised approach for learning node and graph level
representations by contrasting structural views of graphs. We show that unlike
visual representation learning, increasing the number of views to more than two
or contrasting multi-scale encodings do not improve performance, and the best
performance is achieved by contrasting encodings from first-order neighbors and
a graph diffusion. We achieve new state-of-the-art results in self-supervised
learning on 8 out of 8 node and graph classification benchmarks under the
linear evaluation protocol. For example, on Cora (node) and Reddit-Binary
(graph) classification benchmarks, we achieve 86.8% and 84.5% accuracy, which
are 5.5% and 2.4% relative improvements over previous state-of-the-art. When
compared to supervised baselines, our approach outperforms them in 4 out of 8
benchmarks. Source code is released at: https://github.com/kavehhassani/mvgrl
@misc{hassani2020contrastive,
abstract = {We introduce a self-supervised approach for learning node and graph level
representations by contrasting structural views of graphs. We show that unlike
visual representation learning, increasing the number of views to more than two
or contrasting multi-scale encodings do not improve performance, and the best
performance is achieved by contrasting encodings from first-order neighbors and
a graph diffusion. We achieve new state-of-the-art results in self-supervised
learning on 8 out of 8 node and graph classification benchmarks under the
linear evaluation protocol. For example, on Cora (node) and Reddit-Binary
(graph) classification benchmarks, we achieve 86.8% and 84.5% accuracy, which
are 5.5% and 2.4% relative improvements over previous state-of-the-art. When
compared to supervised baselines, our approach outperforms them in 4 out of 8
benchmarks. Source code is released at: https://github.com/kavehhassani/mvgrl},
added-at = {2021-05-06T22:24:43.000+0200},
author = {Hassani, Kaveh and Khasahmadi, Amir Hosein},
biburl = {https://www.bibsonomy.org/bibtex/243ced241345a55ff02d255315c372ecb/analyst},
description = {[2006.05582] Contrastive Multi-View Representation Learning on Graphs},
interhash = {50cac68503e26dad993f74f67f4331c7},
intrahash = {43ced241345a55ff02d255315c372ecb},
keywords = {2020 deep-learning graph multi-view},
note = {cite arxiv:2006.05582Comment: ICML 2020},
timestamp = {2021-05-06T22:24:43.000+0200},
title = {Contrastive Multi-View Representation Learning on Graphs},
url = {http://arxiv.org/abs/2006.05582},
year = 2020
}