The concept of AI literacy, its promotion, and measurement are important topics as they prepare society for the steadily advancing spread of AI technology. The first purpose of the current study is to advance the measurement of AI literacy by collecting evidence regarding the validity of the Meta AI Literacy Scale (MAILS) by Carolus and colleagues published in 2023: a self-assessment instrument for AI literacy and additional psychological competencies conducive for the use of AI. For this purpose, we first formulated the intended measurement purposes of the MAILS. In a second step, we derived empirically testable axioms and subaxioms from the purposes. We tested them in several already published and newly collected data sets. The results are presented in the form of three different empirical studies. We found overall evidence for the validity of the MAILS with some unexpected findings that require further research. We discuss the results for each study individually and also together. Also, avenues for future research are discussed. The study’s second purpose is to develop a short version (10 items) of the original instrument (34 items). It was possible to find a selection of ten items that represent the factors of the MAILS and show a good model fit when tested with confirmatory factor analysis. Further research will be needed to validate the short scale. This paper advances the knowledge about the validity and provides a short measure for AI literacy. However, more research will be necessary to further our understanding of the relationships between AI literacy and other constructs.
%0 Journal Article
%1 koch2024literacy
%A Koch, Martin J.
%A Carolus, Astrid
%A Wienrich, Carolin
%A Latoschik, Marc Erich
%D 2024
%J Heliyon
%K aiatwork c.wienrich myown
%P 23
%R 10.1016/j.heliyon.2024.e39686
%T Meta AI Literacy Scale: Further validation and development of a short version
%U https://www.cell.com/heliyon/fulltext/S2405-8440(24)15717-9
%X The concept of AI literacy, its promotion, and measurement are important topics as they prepare society for the steadily advancing spread of AI technology. The first purpose of the current study is to advance the measurement of AI literacy by collecting evidence regarding the validity of the Meta AI Literacy Scale (MAILS) by Carolus and colleagues published in 2023: a self-assessment instrument for AI literacy and additional psychological competencies conducive for the use of AI. For this purpose, we first formulated the intended measurement purposes of the MAILS. In a second step, we derived empirically testable axioms and subaxioms from the purposes. We tested them in several already published and newly collected data sets. The results are presented in the form of three different empirical studies. We found overall evidence for the validity of the MAILS with some unexpected findings that require further research. We discuss the results for each study individually and also together. Also, avenues for future research are discussed. The study’s second purpose is to develop a short version (10 items) of the original instrument (34 items). It was possible to find a selection of ten items that represent the factors of the MAILS and show a good model fit when tested with confirmatory factor analysis. Further research will be needed to validate the short scale. This paper advances the knowledge about the validity and provides a short measure for AI literacy. However, more research will be necessary to further our understanding of the relationships between AI literacy and other constructs.
@article{koch2024literacy,
abstract = {The concept of AI literacy, its promotion, and measurement are important topics as they prepare society for the steadily advancing spread of AI technology. The first purpose of the current study is to advance the measurement of AI literacy by collecting evidence regarding the validity of the Meta AI Literacy Scale (MAILS) by Carolus and colleagues published in 2023: a self-assessment instrument for AI literacy and additional psychological competencies conducive for the use of AI. For this purpose, we first formulated the intended measurement purposes of the MAILS. In a second step, we derived empirically testable axioms and subaxioms from the purposes. We tested them in several already published and newly collected data sets. The results are presented in the form of three different empirical studies. We found overall evidence for the validity of the MAILS with some unexpected findings that require further research. We discuss the results for each study individually and also together. Also, avenues for future research are discussed. The study’s second purpose is to develop a short version (10 items) of the original instrument (34 items). It was possible to find a selection of ten items that represent the factors of the MAILS and show a good model fit when tested with confirmatory factor analysis. Further research will be needed to validate the short scale. This paper advances the knowledge about the validity and provides a short measure for AI literacy. However, more research will be necessary to further our understanding of the relationships between AI literacy and other constructs.},
added-at = {2024-10-23T12:23:13.000+0200},
author = {Koch, Martin J. and Carolus, Astrid and Wienrich, Carolin and Latoschik, Marc Erich},
biburl = {https://www.bibsonomy.org/bibtex/297eda757c2e80b15e35a36b3177df6c1/hci-uwb},
doi = {10.1016/j.heliyon.2024.e39686},
interhash = {373443b0558e01778ff255985d39ddff},
intrahash = {97eda757c2e80b15e35a36b3177df6c1},
issn = {2405-8440},
journal = {Heliyon},
keywords = {aiatwork c.wienrich myown},
pages = 23,
timestamp = {2024-10-23T12:23:13.000+0200},
title = {Meta AI Literacy Scale: Further validation and development of a short version},
url = {https://www.cell.com/heliyon/fulltext/S2405-8440(24)15717-9},
year = 2024
}