In coming to understand the world—in learning concepts, acquiring language, and grasping causal relations—our minds make inferences that appear to go far beyond the data available. How do we do it? This review describes recent approaches to reverse-engineering human learning and cognitive development and, in parallel, engineering more humanlike machine learning systems. Computational models that perform probabilistic inference over hierarchies of flexibly structured representations can address some of the deepest questions about the nature and origins of human thought: How does abstract knowledge guide learning and reasoning from sparse data? What forms does our knowledge take, across different domains and tasks? And how is that abstract knowledge itself acquired?
Description
How to Grow a Mind: Statistics, Structure, and Abstraction | Science
%0 Journal Article
%1 Tenenbaum1279
%A Tenenbaum, Joshua B.
%A Kemp, Charles
%A Griffiths, Thomas L.
%A Goodman, Noah D.
%D 2011
%I American Association for the Advancement of Science
%J Science
%K 2011 magazine mind probability science statistics
%N 6022
%P 1279--1285
%R 10.1126/science.1192788
%T How to Grow a Mind: Statistics, Structure, and Abstraction
%U http://science.sciencemag.org/content/331/6022/1279
%V 331
%X In coming to understand the world—in learning concepts, acquiring language, and grasping causal relations—our minds make inferences that appear to go far beyond the data available. How do we do it? This review describes recent approaches to reverse-engineering human learning and cognitive development and, in parallel, engineering more humanlike machine learning systems. Computational models that perform probabilistic inference over hierarchies of flexibly structured representations can address some of the deepest questions about the nature and origins of human thought: How does abstract knowledge guide learning and reasoning from sparse data? What forms does our knowledge take, across different domains and tasks? And how is that abstract knowledge itself acquired?
@article{Tenenbaum1279,
abstract = {In coming to understand the world{\textemdash}in learning concepts, acquiring language, and grasping causal relations{\textemdash}our minds make inferences that appear to go far beyond the data available. How do we do it? This review describes recent approaches to reverse-engineering human learning and cognitive development and, in parallel, engineering more humanlike machine learning systems. Computational models that perform probabilistic inference over hierarchies of flexibly structured representations can address some of the deepest questions about the nature and origins of human thought: How does abstract knowledge guide learning and reasoning from sparse data? What forms does our knowledge take, across different domains and tasks? And how is that abstract knowledge itself acquired?},
added-at = {2018-08-13T12:47:34.000+0200},
author = {Tenenbaum, Joshua B. and Kemp, Charles and Griffiths, Thomas L. and Goodman, Noah D.},
biburl = {https://www.bibsonomy.org/bibtex/25f886a5e7600867eef269a2ecb426c36/analyst},
description = {How to Grow a Mind: Statistics, Structure, and Abstraction | Science},
doi = {10.1126/science.1192788},
eprint = {http://science.sciencemag.org/content/331/6022/1279.full.pdf},
interhash = {9fa7d42370ac5845b6b3fc523052bd10},
intrahash = {5f886a5e7600867eef269a2ecb426c36},
issn = {0036-8075},
journal = {Science},
keywords = {2011 magazine mind probability science statistics},
number = 6022,
pages = {1279--1285},
publisher = {American Association for the Advancement of Science},
timestamp = {2018-08-13T12:47:34.000+0200},
title = {How to Grow a Mind: Statistics, Structure, and Abstraction},
url = {http://science.sciencemag.org/content/331/6022/1279},
volume = 331,
year = 2011
}