Self-supervision as an emerging technique has been employed to train convolutional neural networks (CNNs) for more transferrable, generalizable, and robust representation learning of images. Its introduction to graph convolutional networks (GCNs) operating on graph data is however rarely explored. In this study, we report the first systematic exploration and assessment of incorporating self-supervision into GCNs. We first elaborate three mechanisms to incorporate selfsupervision into GCNs, analyze the limitations of pretraining & finetuning and self-training, and proceed to focus on multi-task learning. Moreover, we propose to investigate three novel selfsupervised learning tasks for GCNs with theoretical rationales and numerical comparisons. Lastly, we further integrate multi-task self-supervision into graph adversarial training. Our results show that, with properly designed task forms and incorporation mechanisms, self-supervision benefits GCNs in gaining more generalizability and robustness. Our codes are available at https: //github.com/Shen-Lab/SS-GCNs.
Comment: Supplementary materials are available at https://yyou1996.github.io/files/icml2020\_ssgcn\_supplement.pdf. ICML 2020
language
en
file
You 等。 - 2020 - When Does Self-Supervision Help Graph Convolutiona.pdf:files/42/You 等。 - 2020 - When Does Self-Supervision Help Graph Convolutiona.pdf:application/pdf
%0 Conference Paper
%1 you_when_2020
%A You, Yuning
%A Chen, Tianlong
%A Wang, Zhangyang
%A Shen, Yang
%B arXiv:2006.09136 cs, stat
%D 2020
%K multi-task ssl gnn
%T When Does Self-Supervision Help Graph Convolutional Networks?
%U http://arxiv.org/abs/2006.09136
%X Self-supervision as an emerging technique has been employed to train convolutional neural networks (CNNs) for more transferrable, generalizable, and robust representation learning of images. Its introduction to graph convolutional networks (GCNs) operating on graph data is however rarely explored. In this study, we report the first systematic exploration and assessment of incorporating self-supervision into GCNs. We first elaborate three mechanisms to incorporate selfsupervision into GCNs, analyze the limitations of pretraining & finetuning and self-training, and proceed to focus on multi-task learning. Moreover, we propose to investigate three novel selfsupervised learning tasks for GCNs with theoretical rationales and numerical comparisons. Lastly, we further integrate multi-task self-supervision into graph adversarial training. Our results show that, with properly designed task forms and incorporation mechanisms, self-supervision benefits GCNs in gaining more generalizability and robustness. Our codes are available at https: //github.com/Shen-Lab/SS-GCNs.
%Z Comment: Supplementary materials are available at https://yyou1996.github.io/files/icml2020\_ssgcn\_supplement.pdf. ICML 2020
@inproceedings{you_when_2020,
abstract = {Self-supervision as an emerging technique has been employed to train convolutional neural networks (CNNs) for more transferrable, generalizable, and robust representation learning of images. Its introduction to graph convolutional networks (GCNs) operating on graph data is however rarely explored. In this study, we report the first systematic exploration and assessment of incorporating self-supervision into GCNs. We first elaborate three mechanisms to incorporate selfsupervision into GCNs, analyze the limitations of pretraining \& finetuning and self-training, and proceed to focus on multi-task learning. Moreover, we propose to investigate three novel selfsupervised learning tasks for GCNs with theoretical rationales and numerical comparisons. Lastly, we further integrate multi-task self-supervision into graph adversarial training. Our results show that, with properly designed task forms and incorporation mechanisms, self-supervision benefits GCNs in gaining more generalizability and robustness. Our codes are available at https: //github.com/Shen-Lab/SS-GCNs.},
added-at = {2021-04-08T07:18:22.000+0200},
annote = {Comment: Supplementary materials are available at https://yyou1996.github.io/files/icml2020\_ssgcn\_supplement.pdf. ICML 2020},
author = {You, Yuning and Chen, Tianlong and Wang, Zhangyang and Shen, Yang},
biburl = {https://www.bibsonomy.org/bibtex/2908800d9dfa9c426d03a1e3f657ea6c4/mengcao},
booktitle = {{arXiv}:2006.09136 [cs, stat]},
file = {You 等。 - 2020 - When Does Self-Supervision Help Graph Convolutiona.pdf:files/42/You 等。 - 2020 - When Does Self-Supervision Help Graph Convolutiona.pdf:application/pdf},
interhash = {0e3deac59b27ce2d51f4ce2daf8c17d8},
intrahash = {908800d9dfa9c426d03a1e3f657ea6c4},
keywords = {multi-task ssl gnn},
language = {en},
month = jul,
note = {arXiv: 2006.09136},
timestamp = {2021-04-25T13:21:04.000+0200},
title = {When {Does} {Self}-{Supervision} {Help} {Graph} {Convolutional} {Networks}?},
url = {http://arxiv.org/abs/2006.09136},
urldate = {2020-11-29},
year = 2020
}