
Researcher
Mark Whiting
Senior Computational Social Scientist
Mark Whiting is a Senior Computational Social Scientist at the CSSLab, affiliated with the Department of Computer & Information Science at Penn Engineering and the Department of Applied Science and Operations, Information and Decisions at Wharton. He builds systems to study how people behave and coordinate at scale. At the Lab, he leads the High-Throughput Experiments on Group Dynamics, COVID-Philadelphia, and Common Sense Projects.
Mark was previously a post-doctoral researcher under Michael S. Bernstein in the HCI group in Computer Science at Stanford. He holds bachelor’s and master’s degrees in Industrial Design from RMIT and KAIST respectively, and a PhD in Mechanical Engineering from CMU.
Publications +
Almaatouq, Abdullah; Griffiths, Thomas L.; Suchow, Jordan W.; Whiting, Mark E.; Evans, James; Watts, Duncan J.
Beyond Playing 20 Questions with Nature: Integrative Experiment Design in the Social and Behavioral Sciences Journal Article
In: Behavioral and Brain Sciences, 2022.
@article{nokeyb,
title = {Beyond Playing 20 Questions with Nature: Integrative Experiment Design in the Social and Behavioral Sciences },
author = {Almaatouq, Abdullah and Griffiths, Thomas L. and Suchow, Jordan W. and Whiting, Mark E. and Evans, James and Watts, Duncan J. },
url = {https://www.cambridge.org/core/journals/behavioral-and-brain-sciences/article/abs/beyond-playing-20-questions-with-nature-integrative-experiment-design-in-the-social-and-behavioral-sciences/7E0D34D5AE2EFB9C0902414C23E0C292#article},
doi = {10.1017/S0140525X22002874},
year = {2022},
date = {2022-12-21},
urldate = {2022-12-21},
journal = {Behavioral and Brain Sciences},
abstract = {The dominant paradigm of experiments in the social and behavioral sciences views an experiment as a test of a theory, where the theory is assumed to generalize beyond the experiment's specific conditions. According to this view, which Alan Newell once characterized as “playing twenty questions with nature,” theory is advanced one experiment at a time, and the integration of disparate findings is assumed to happen via the scientific publishing process. In this article, we argue that the process of integration is at best inefficient, and at worst it does not, in fact, occur. We further show that the challenge of integration cannot be adequately addressed by recently proposed reforms that focus on the reliability and replicability of individual findings, nor simply by conducting more or larger experiments. Rather, the problem arises from the imprecise nature of social and behavioral theories and, consequently, a lack of commensurability across experiments conducted under different conditions. Therefore, researchers must fundamentally rethink how they design experiments and how the experiments relate to theory. We specifically describe an alternative framework, integrative experiment design, which intrinsically promotes commensurability and continuous integration of knowledge. In this paradigm, researchers explicitly map the design space of possible experiments associated with a given research question, embracing many potentially relevant theories rather than focusing on just one. The researchers then iteratively generate theories and test them with experiments explicitly sampled from the design space, allowing results to be integrated across experiments. Given recent methodological and technological developments, we conclude that this approach is feasible and would generate more-reliable, more-cumulative empirical and theoretical knowledge than the current paradigm—and with far greater efficiency.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Almaatouq, Abdullah; Becker, Joshua; Bernstein, Michael S.; Botto, Robert; Bradlow, Eric T.; Damer, Ekaterina; Duckworth, Angela; Griffiths, Tom; Hartshorne, Joshua K.; Lazer, David; Law, Edith; Liu, Min; Matias, J. Nathan; Rand, David; Salganik, Matthew; Emma Satlof-Bedrick, Maurice Schweitzer; Shirado, Hirokazu; Suchow, Jordan W.; Suri, Siddharth; Tsvetkova, Milena; Watts, Duncan J.; Whiting, Mark E.; Yin., Ming
Scaling up experimental social, behavioral, and economic science Technical Report
2021.
@techreport{almaatouq2021scaling,
title = {Scaling up experimental social, behavioral, and economic science},
author = {Abdullah Almaatouq and Joshua Becker and Michael S. Bernstein and Robert Botto and Eric T. Bradlow and Ekaterina Damer and Angela Duckworth and Tom Griffiths and Joshua K. Hartshorne and David Lazer and Edith Law and Min Liu and J. Nathan Matias and David Rand and Matthew Salganik and Emma Satlof-Bedrick, Maurice Schweitzer and Hirokazu Shirado and Jordan W. Suchow and Siddharth Suri and Milena Tsvetkova and Duncan J. Watts and Mark E. Whiting and Ming Yin.},
url = {https://drive.google.com/file/d/1-4kcD8yn4dTikxrbbm5oaGnaEvj3XQ5B/view?usp=sharing},
doi = {10.17605/OSF.IO/KNVJS},
year = {2021},
date = {2021-06-29},
urldate = {2021-06-29},
pages = {40},
abstract = {The standard experimental paradigm in the social, behavioral, and economic sciences is extremely limited. Although recent advances in digital technologies and crowdsourcing services allow individual experiments to be deployed and run faster than in traditional physical labs, a majority of experiments still focus on one-off results that do not generalize easily to real-world contexts or even to other variations of the same experiment. As a result, there exist few universally acknowledged findings, and even those are occasionally overturned by new data. We argue that to achieve replicable, generalizable, scalable and ultimately useful social and behavioral science, a fundamental rethinking of the model of virtual-laboratory style experiments is required. Not only is it possible to design and run experiments that are radically different in scale and scope than was possible in an era of physical labs; this ability allows us to ask fundamentally different types of questions than have been asked historically of lab studies. We argue, however, that taking full advantage of this new and exciting potential will require four major changes to the infrastructure, methodology, and culture of experimental science: (1) significant investments in software design and participant recruitment, (2) innovations in experimental design and analysis of experimental data, (3) adoption of new models of collaboration, and (4) a new understanding of the nature and role of theory in experimental social and behavioral science. We conclude that the path we outline, although ambitious, is well within the power of current technology and has the potential to facilitate a new class of scientific advances in social, behavioral and economic studies.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hu, Xinlan Emily; Whiting, Mark; Bernstein, Michael
Can Online Juries Make Consistent, Repeatable Decisions? Journal Article
In: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, pp. 1-16, 2021.
@article{hu2021can,
title = {Can Online Juries Make Consistent, Repeatable Decisions?},
author = {Xinlan Emily Hu and Mark Whiting and Michael Bernstein},
url = {https://www.researchgate.net/profile/Mark-Whiting-2/publication/351417423_Can_Online_Juries_Make_Consistent_Repeatable_Decisions/links/60a63ff7a6fdcc731d3ecc47/Can-Online-Juries-Make-Consistent-Repeatable-Decisions.pdf},
doi = {10.1145/3411764.3445433},
year = {2021},
date = {2021-05-06},
journal = {Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems},
pages = {1-16},
abstract = {A jury of one’s peers is a prominent way to adjudicate disputes and is increasingly used in participatory governance online. The fairness of this approach rests on the assumption that juries are consistent: that the same jury would hand down similar judgments to similar cases. However, prior literature suggests that social influence would instead cause early interactions to cascade into different judgments for similar cases. In this paper, we report an online experiment that changes participants’ pseudonyms as they appear to collaborators, temporarily masking a jury’s awareness that they have deliberated together before. This technique allows us to measure consistency by reconvening the same jury on similar cases. Counter to expectation, juries are equally consistent as individuals, a result that is “good for democracy.” But this consistency arises in part due to group polarization, as consensus develops by hardening initial majority opinions. Furthermore, we find that aggregating groups’ perspectives without deliberation erodes consistency.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gamage, Dilrukshi; Staubitz, Thomas; Whiting, Mark
Peer assessment in MOOCs: Systematic literature review Journal Article
In: Distance Education, pp. 1-22, 2021.
@article{gamage2021peer,
title = {Peer assessment in MOOCs: Systematic literature review},
author = {Dilrukshi Gamage and Thomas Staubitz and Mark Whiting},
url = {https://www.researchgate.net/profile/Dilrukshi-Gamage-2/publication/351344733_Peer_assessment_in_MOOCs_Systematic_literature_review/links/60923fc3a6fdccaebd093ccc/Peer-assessment-in-MOOCs-Systematic-literature-review.pdf},
doi = {10.1080/01587919.2021.1911626},
year = {2021},
date = {2021-05-03},
journal = {Distance Education},
pages = {1-22},
abstract = {We report on a systematic review of the landscape of peer assessment in massive open online courses (MOOCs) with papers from 2014 to 2020 in 20 leading education technology publication venues across four databases containing education technology–related papers, addressing three research issues: the evolution of peer assessment in MOOCs during the period 2014 to 2020, the methods used in MOOCs to assess peers, and the challenges of and future directions in MOOC peer assessment. We provide summary statistics and a review of methods across the corpus and highlight three directions for improving the use of peer assessment in MOOCs: the need for focusing on scaling learning through peer evaluations, the need for scaling and optimizing team submissions in team peer assessments, and the need for embedding a social process for peer assessment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}