Generalizable, transferrable, and robust representation learning on graph-structured data remains a challenge for current graph neural networks (GNNs). Unlike what has been developed for convolutional neural networks (CNNs) for image data, self-supervised learning and pre-training are less explored for GNNs. In this paper, we propose a graph contrastive learning (GraphCL) framework for learning unsupervised representations of graph data. We first design four types of graph augmentations to incorporate various priors. We then systematically study the impact of various combinations of graph augmentations on multiple datasets, in four different settings: semi-supervised, unsupervised, and transfer learning as well as adversarial attacks. The results show that, even without tuning augmentation extents nor using sophisticated GNN architectures, our GraphCL framework can produce graph representations of similar or better generalizability, transferrability, and robustness compared to state-of-the-art methods. We also investigate the impact of parameterized graph augmentation extents and patterns, and observe further performance gains in preliminary experiments. Our codes are available at: https://github.com/Shen-Lab/GraphCL.
%0 Conference Paper
%1 you_graph_2020
%A You, Yuning
%A Chen, Tianlong
%A Sui, Yongduo
%A Chen, Ting
%A Wang, Zhangyang
%A Shen, Yang
%B arXiv:2010.13902 cs
%D 2020
%K ssl gnn
%T Graph Contrastive Learning with Augmentations
%U http://arxiv.org/abs/2010.13902
%X Generalizable, transferrable, and robust representation learning on graph-structured data remains a challenge for current graph neural networks (GNNs). Unlike what has been developed for convolutional neural networks (CNNs) for image data, self-supervised learning and pre-training are less explored for GNNs. In this paper, we propose a graph contrastive learning (GraphCL) framework for learning unsupervised representations of graph data. We first design four types of graph augmentations to incorporate various priors. We then systematically study the impact of various combinations of graph augmentations on multiple datasets, in four different settings: semi-supervised, unsupervised, and transfer learning as well as adversarial attacks. The results show that, even without tuning augmentation extents nor using sophisticated GNN architectures, our GraphCL framework can produce graph representations of similar or better generalizability, transferrability, and robustness compared to state-of-the-art methods. We also investigate the impact of parameterized graph augmentation extents and patterns, and observe further performance gains in preliminary experiments. Our codes are available at: https://github.com/Shen-Lab/GraphCL.
%Z Comment: Supplementary materials are available at https://yyou1996.github.io/files/neurips2020\_graphcl\_supplement.pdf. NeurIPS 2020
@inproceedings{you_graph_2020,
abstract = {Generalizable, transferrable, and robust representation learning on graph-structured data remains a challenge for current graph neural networks (GNNs). Unlike what has been developed for convolutional neural networks (CNNs) for image data, self-supervised learning and pre-training are less explored for GNNs. In this paper, we propose a graph contrastive learning (GraphCL) framework for learning unsupervised representations of graph data. We first design four types of graph augmentations to incorporate various priors. We then systematically study the impact of various combinations of graph augmentations on multiple datasets, in four different settings: semi-supervised, unsupervised, and transfer learning as well as adversarial attacks. The results show that, even without tuning augmentation extents nor using sophisticated GNN architectures, our GraphCL framework can produce graph representations of similar or better generalizability, transferrability, and robustness compared to state-of-the-art methods. We also investigate the impact of parameterized graph augmentation extents and patterns, and observe further performance gains in preliminary experiments. Our codes are available at: https://github.com/Shen-Lab/GraphCL.},
added-at = {2021-04-08T07:18:22.000+0200},
annote = {Comment: Supplementary materials are available at https://yyou1996.github.io/files/neurips2020\_graphcl\_supplement.pdf. NeurIPS 2020},
author = {You, Yuning and Chen, Tianlong and Sui, Yongduo and Chen, Ting and Wang, Zhangyang and Shen, Yang},
biburl = {https://www.bibsonomy.org/bibtex/23c3f34ed8b6d3a5874742424f005b226/mengcao},
booktitle = {{arXiv}:2010.13902 [cs]},
file = {You 等。 - 2020 - Graph Contrastive Learning with Augmentations.pdf:files/50/You 等。 - 2020 - Graph Contrastive Learning with Augmentations.pdf:application/pdf},
interhash = {35669cdcc7816cf316e7f87781bda16b},
intrahash = {3c3f34ed8b6d3a5874742424f005b226},
keywords = {ssl gnn},
language = {en},
month = nov,
note = {arXiv: 2010.13902},
timestamp = {2021-04-25T13:21:04.000+0200},
title = {Graph {Contrastive} {Learning} with {Augmentations}},
url = {http://arxiv.org/abs/2010.13902},
urldate = {2020-11-29},
year = 2020
}