As automated tools for grading programming assignments become more widely used, it is imperative that we better understand how students are utilizing them. Other researchers have provided helpful data on the role automated assessment tools (AATs) have played in the classroom. In order to investigate improved practices in using AATs for student learning, we sought to better understand how students iteratively modify their programs toward a solution by analyzing more than 45,000 student submissions over 7 semesters in an introductory (CS1) programming course. The resulting metrics allowed us to study what steps students took toward solutions for programming assignments. This paper considers the incremental changes students make and the correlating score between sequential submissions, measured by metrics including source lines of code, cyclomatic (McCabe) complexity, state space, and the 6 Halstead measures of complexity of the program. We demonstrate the value of throttling and show that generating software metrics for analysis can serve to help instructors better guide student learning.
Description
An Empirical Study of Iterative Improvement in Programming Assignments
%0 Conference Paper
%1 Pettit:2015:ESI:2676723.2677279
%A Pettit, Raymond
%A Homer, John
%A Gee, Roger
%A Mengel, Susan
%A Starbuck, Adam
%B Proceedings of the 46th ACM Technical Symposium on Computer Science Education
%C New York, NY, USA
%D 2015
%I ACM
%K automatic-assessment programming
%P 410--415
%R 10.1145/2676723.2677279
%T An Empirical Study of Iterative Improvement in Programming Assignments
%U http://doi.acm.org/10.1145/2676723.2677279
%X As automated tools for grading programming assignments become more widely used, it is imperative that we better understand how students are utilizing them. Other researchers have provided helpful data on the role automated assessment tools (AATs) have played in the classroom. In order to investigate improved practices in using AATs for student learning, we sought to better understand how students iteratively modify their programs toward a solution by analyzing more than 45,000 student submissions over 7 semesters in an introductory (CS1) programming course. The resulting metrics allowed us to study what steps students took toward solutions for programming assignments. This paper considers the incremental changes students make and the correlating score between sequential submissions, measured by metrics including source lines of code, cyclomatic (McCabe) complexity, state space, and the 6 Halstead measures of complexity of the program. We demonstrate the value of throttling and show that generating software metrics for analysis can serve to help instructors better guide student learning.
%@ 978-1-4503-2966-8
@inproceedings{Pettit:2015:ESI:2676723.2677279,
abstract = {As automated tools for grading programming assignments become more widely used, it is imperative that we better understand how students are utilizing them. Other researchers have provided helpful data on the role automated assessment tools (AATs) have played in the classroom. In order to investigate improved practices in using AATs for student learning, we sought to better understand how students iteratively modify their programs toward a solution by analyzing more than 45,000 student submissions over 7 semesters in an introductory (CS1) programming course. The resulting metrics allowed us to study what steps students took toward solutions for programming assignments. This paper considers the incremental changes students make and the correlating score between sequential submissions, measured by metrics including source lines of code, cyclomatic (McCabe) complexity, state space, and the 6 Halstead measures of complexity of the program. We demonstrate the value of throttling and show that generating software metrics for analysis can serve to help instructors better guide student learning.},
acmid = {2677279},
added-at = {2018-08-14T10:50:09.000+0200},
address = {New York, NY, USA},
author = {Pettit, Raymond and Homer, John and Gee, Roger and Mengel, Susan and Starbuck, Adam},
biburl = {https://www.bibsonomy.org/bibtex/2bc535921f84ce7ac32d208ce64ea66ed/brusilovsky},
booktitle = {Proceedings of the 46th ACM Technical Symposium on Computer Science Education},
description = {An Empirical Study of Iterative Improvement in Programming Assignments},
doi = {10.1145/2676723.2677279},
interhash = {e69f39d7890093603ee9c76a8873d5b0},
intrahash = {bc535921f84ce7ac32d208ce64ea66ed},
isbn = {978-1-4503-2966-8},
keywords = {automatic-assessment programming},
location = {Kansas City, Missouri, USA},
numpages = {6},
pages = {410--415},
publisher = {ACM},
series = {SIGCSE '15},
timestamp = {2018-08-14T10:50:09.000+0200},
title = {An Empirical Study of Iterative Improvement in Programming Assignments},
url = {http://doi.acm.org/10.1145/2676723.2677279},
year = 2015
}