Deep neural networks have been able to outperform humans in some cases like
image recognition and image classification. However, with the emergence of
various novel categories, the ability to continuously widen the learning
capability of such networks from limited samples, still remains a challenge.
Techniques like Meta-Learning and/or few-shot learning showed promising
results, where they can learn or generalize to a novel category/task based on
prior knowledge. In this paper, we perform a study of the existing few-shot
meta-learning techniques in the computer vision domain based on their method
and evaluation metrics. We provide a taxonomy for the techniques and categorize
them as data-augmentation, embedding, optimization and semantics based learning
for few-shot, one-shot and zero-shot settings. We then describe the seminal
work done in each category and discuss their approach towards solving the
predicament of learning from few samples. Lastly we provide a comparison of
these techniques on the commonly used benchmark datasets: Omniglot, and
MiniImagenet, along with a discussion towards the future direction of improving
the performance of these techniques towards the final goal of outperforming
humans.
%0 Generic
%1 bendre2020learning
%A Bendre, Nihar
%A Marín, Hugo Terashima
%A Najafirad, Peyman
%D 2020
%K 2020 deep-learning few-shot survey
%T Learning from Few Samples: A Survey
%U http://arxiv.org/abs/2007.15484
%X Deep neural networks have been able to outperform humans in some cases like
image recognition and image classification. However, with the emergence of
various novel categories, the ability to continuously widen the learning
capability of such networks from limited samples, still remains a challenge.
Techniques like Meta-Learning and/or few-shot learning showed promising
results, where they can learn or generalize to a novel category/task based on
prior knowledge. In this paper, we perform a study of the existing few-shot
meta-learning techniques in the computer vision domain based on their method
and evaluation metrics. We provide a taxonomy for the techniques and categorize
them as data-augmentation, embedding, optimization and semantics based learning
for few-shot, one-shot and zero-shot settings. We then describe the seminal
work done in each category and discuss their approach towards solving the
predicament of learning from few samples. Lastly we provide a comparison of
these techniques on the commonly used benchmark datasets: Omniglot, and
MiniImagenet, along with a discussion towards the future direction of improving
the performance of these techniques towards the final goal of outperforming
humans.
@misc{bendre2020learning,
abstract = {Deep neural networks have been able to outperform humans in some cases like
image recognition and image classification. However, with the emergence of
various novel categories, the ability to continuously widen the learning
capability of such networks from limited samples, still remains a challenge.
Techniques like Meta-Learning and/or few-shot learning showed promising
results, where they can learn or generalize to a novel category/task based on
prior knowledge. In this paper, we perform a study of the existing few-shot
meta-learning techniques in the computer vision domain based on their method
and evaluation metrics. We provide a taxonomy for the techniques and categorize
them as data-augmentation, embedding, optimization and semantics based learning
for few-shot, one-shot and zero-shot settings. We then describe the seminal
work done in each category and discuss their approach towards solving the
predicament of learning from few samples. Lastly we provide a comparison of
these techniques on the commonly used benchmark datasets: Omniglot, and
MiniImagenet, along with a discussion towards the future direction of improving
the performance of these techniques towards the final goal of outperforming
humans.},
added-at = {2020-08-01T13:36:26.000+0200},
author = {Bendre, Nihar and Marín, Hugo Terashima and Najafirad, Peyman},
biburl = {https://www.bibsonomy.org/bibtex/217a058e479def71a5c5d80234f18092c/analyst},
description = {[2007.15484] Learning from Few Samples: A Survey},
interhash = {0028db52d46c02fab700afd47e1ca4a8},
intrahash = {17a058e479def71a5c5d80234f18092c},
keywords = {2020 deep-learning few-shot survey},
note = {cite arxiv:2007.15484Comment: 17 pages, 10 figures},
timestamp = {2020-08-01T18:28:32.000+0200},
title = {Learning from Few Samples: A Survey},
url = {http://arxiv.org/abs/2007.15484},
year = 2020
}