Self-supervision provides effective representations for downstream tasks
without requiring labels. However, existing approaches lag behind fully
supervised training and are often not thought beneficial beyond obviating the
need for annotations. We find that self-supervision can benefit robustness in a
variety of ways, including robustness to adversarial examples, label
corruption, and common input corruptions. Additionally, self-supervision
greatly benefits out-of-distribution detection on difficult, near-distribution
outliers, so much so that it exceeds the performance of fully supervised
methods. These results demonstrate the promise of self-supervision for
improving robustness and uncertainty estimation and establish these tasks as
new axes of evaluation for future self-supervised learning research.
Description
[1906.12340] Using Self-Supervised Learning Can Improve Model Robustness and Uncertainty
%0 Journal Article
%1 hendrycks2019using
%A Hendrycks, Dan
%A Mazeika, Mantas
%A Kadavath, Saurav
%A Song, Dawn
%D 2019
%K robustness uncertainty
%T Using Self-Supervised Learning Can Improve Model Robustness and
Uncertainty
%U http://arxiv.org/abs/1906.12340
%X Self-supervision provides effective representations for downstream tasks
without requiring labels. However, existing approaches lag behind fully
supervised training and are often not thought beneficial beyond obviating the
need for annotations. We find that self-supervision can benefit robustness in a
variety of ways, including robustness to adversarial examples, label
corruption, and common input corruptions. Additionally, self-supervision
greatly benefits out-of-distribution detection on difficult, near-distribution
outliers, so much so that it exceeds the performance of fully supervised
methods. These results demonstrate the promise of self-supervision for
improving robustness and uncertainty estimation and establish these tasks as
new axes of evaluation for future self-supervised learning research.
@article{hendrycks2019using,
abstract = {Self-supervision provides effective representations for downstream tasks
without requiring labels. However, existing approaches lag behind fully
supervised training and are often not thought beneficial beyond obviating the
need for annotations. We find that self-supervision can benefit robustness in a
variety of ways, including robustness to adversarial examples, label
corruption, and common input corruptions. Additionally, self-supervision
greatly benefits out-of-distribution detection on difficult, near-distribution
outliers, so much so that it exceeds the performance of fully supervised
methods. These results demonstrate the promise of self-supervision for
improving robustness and uncertainty estimation and establish these tasks as
new axes of evaluation for future self-supervised learning research.},
added-at = {2019-10-08T16:39:27.000+0200},
author = {Hendrycks, Dan and Mazeika, Mantas and Kadavath, Saurav and Song, Dawn},
biburl = {https://www.bibsonomy.org/bibtex/2193f9b2deb6061eb556a6a0d750c4761/kirk86},
description = {[1906.12340] Using Self-Supervised Learning Can Improve Model Robustness and Uncertainty},
interhash = {5b14825a899aa796c9355339003ad0a3},
intrahash = {193f9b2deb6061eb556a6a0d750c4761},
keywords = {robustness uncertainty},
note = {cite arxiv:1906.12340Comment: Code and dataset available at https://github.com/hendrycks/ss-ood},
timestamp = {2019-10-08T16:39:27.000+0200},
title = {Using Self-Supervised Learning Can Improve Model Robustness and
Uncertainty},
url = {http://arxiv.org/abs/1906.12340},
year = 2019
}