Publications
2024
Barreras, Francisco (Paco); Watts, Duncan J.
The exciting potential and daunting challenge of using GPS human-mobility data for epidemic modeling Journal Article
In: Natural Computational Science, vol. 4, pp. 398-411, 2024.
@article{nokey,
title = {The exciting potential and daunting challenge of using GPS human-mobility data for epidemic modeling},
author = {Barreras, Francisco (Paco) and Watts, Duncan J. },
url = {https://www.nature.com/articles/s43588-024-00637-0},
doi = {https://doi.org/10.1038/s43588-024-00637-0},
year = {2024},
date = {2024-06-19},
journal = {Natural Computational Science},
volume = {4},
pages = {398-411},
abstract = {Large-scale GPS location datasets hold immense potential for measuring human mobility and interpersonal contact, both of which are essential for data-driven epidemiology. However, despite their potential and widespread adoption during the COVID-19 pandemic, there are several challenges with these data that raise concerns regarding the validity and robustness of its applications. Here we outline two types of challenges—some related to accessing and processing these data, and some related to data quality—and propose several research directions to address them moving forward.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Bryan; Haider, Samar; Callison-Burch, Chris
This Land is {Your, My} Land: Evaluating Geopolitical Biases in Language Models Proceedings Article
In: Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 3855-3871, 2024.
@inproceedings{nokey,
title = {This Land is {Your, My} Land: Evaluating Geopolitical Biases in Language Models},
author = {Li, Bryan and Haider, Samar and Callison-Burch, Chris },
url = {"https://aclanthology.org/2024.naacl-long.213"},
year = {2024},
date = {2024-06-16},
booktitle = {Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
journal = {Association for Computational Linguistics},
volume = {1},
pages = {3855-3871},
abstract = {Do the Spratly Islands belong to China, the Philippines, or Vietnam? A pretrained large language model (LLM) may answer differently if asked in the languages of each claimant country: Chinese, Tagalog, or Vietnamese. This contrasts with a multilingual human, who would likely answer consistently. In this paper, we show that LLMs recall certain geographical knowledge inconsistently when queried in different languages—a phenomenon we term geopolitical bias. As a targeted case study, we consider territorial disputes, an inherently controversial and multilingual task. We introduce BorderLines, a dataset of territorial disputes which covers 251 territories, each associated with a set of multiple-choice questions in the languages of each claimant country (49 languages in total). We also propose a suite of evaluation metrics to precisely quantify bias and consistency in responses across different languages. We then evaluate various multilingual LLMs on our dataset and metrics to probe their internal knowledge and use the proposed metrics to discover numerous inconsistencies in how these models respond in different languages. Finally, we explore several prompt modification strategies, aiming to either amplify or mitigate geopolitical bias, which highlights how brittle LLMs are and how they tailor their responses depending on cues from the interaction context. Our code and data are available at https://github.com/manestay/borderlines.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Budak, Ceren; Nyhan, Brendan; Rothschild, David M.; Emily Thorson,; Watts, Duncan J.
Misunderstanding the harms of online misinformation Journal Article
In: Nature , vol. 630 , pp. 45-53, 2024.
@article{nokey,
title = {Misunderstanding the harms of online misinformation},
author = {Budak, Ceren and Nyhan, Brendan and Rothschild, David M. and Thorson, Emily, and Watts, Duncan J. },
url = {https://www.nature.com/articles/s41586-024-07417-w#citeas},
doi = {https://doi.org/10.1038/s41586-024-07417-w},
year = {2024},
date = {2024-06-05},
urldate = {2024-06-05},
journal = {Nature },
volume = {630 },
pages = {45-53},
abstract = {The controversy over online misinformation and social media has opened a gap between public discourse and scientific research. Public intellectuals and journalists frequently make sweeping claims about the effects of exposure to false content online that are inconsistent with much of the current empirical evidence. Here we identify three common misperceptions: that average exposure to problematic content is high, that algorithms are largely responsible for this exposure and that social media is a primary cause of broader social problems such as polarization. In our review of behavioural science research on online misinformation, we document a pattern of low exposure to false and inflammatory content that is concentrated among a narrow fringe with strong motivations to seek out such information. In response, we recommend holding platforms accountable for facilitating exposure to false and extreme content in the tails of the distribution, where consumption is highest and the risk of real-world harm is greatest. We also call for increased platform transparency, including collaborations with outside researchers, to better evaluate the effects of online misinformation and the most effective responses to it. Taking these steps is especially important outside the USA and Western Europe, where research and data are scant and harms may be more severe.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Allen, Jennifer; Watts, Duncan J.; Rand, David G.
Quantifying the impact of misinformation and vaccine-skeptical content on Facebook Journal Article
In: Science, vol. 384, iss. 6699, 2024.
@article{nokey,
title = {Quantifying the impact of misinformation and vaccine-skeptical content on Facebook},
author = {Allen, Jennifer and Watts, Duncan J. and Rand, David G. },
url = {https://www.science.org/doi/10.1126/science.adk3451},
year = {2024},
date = {2024-05-31},
journal = {Science},
volume = {384},
issue = {6699},
abstract = {Low uptake of the COVID-19 vaccine in the US has been widely attributed to social media misinformation. To evaluate this claim, we introduce a framework combining lab experiments (total N = 18,725), crowdsourcing, and machine learning to estimate the causal effect of 13,206 vaccine-related URLs on the vaccination intentions of US Facebook users (N ≈ 233 million). We estimate that the impact of unflagged content that nonetheless encouraged vaccine skepticism was 46-fold greater than that of misinformation flagged by fact-checkers. Although misinformation reduced predicted vaccination intentions significantly more than unflagged vaccine content when viewed, Facebook users’ exposure to flagged content was limited. In contrast, unflagged stories highlighting rare deaths after vaccination were among Facebook’s most-viewed stories. Our work emphasizes the need to scrutinize factually accurate but potentially misleading content in addition to outright falsehoods.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hosseinmardi, Homa; Ghasemian, Amir; Rivera-Lanas, Miguel; Horta Ribeiro, Manoel; West, Robert; Watts, Duncan J.
Causally estimating the effect of YouTube’s recommender system using counterfactual bots Journal Article
In: PNAS, vol. 121, iss. 8, 2024.
@article{nokey,
title = {Causally estimating the effect of YouTube’s recommender system using counterfactual bots},
author = {Hosseinmardi, Homa and Ghasemian, Amir and Rivera-Lanas, Miguel and Horta Ribeiro, Manoel and West, Robert and Watts, Duncan J. },
doi = {https://doi.org/10.1073/pnas.2313377121},
year = {2024},
date = {2024-02-13},
urldate = {2024-02-13},
journal = {PNAS},
volume = {121},
issue = {8},
abstract = {In recent years, critics of online platforms have raised concerns about the ability of recommendation algorithms to amplify problematic content, with potentially radicalizing consequences. However, attempts to evaluate the effect of recommenders have suffered from a lack of appropriate counterfactuals—what a user would have viewed in the absence of algorithmic recommendations—and hence cannot disentangle the effects of the algorithm from a user’s intentions. Here we propose a method that we call “counterfactual bots” to causally estimate the role of algorithmic recommendations on the consumption of highly partisan content on YouTube. By comparing bots that replicate real users’ consumption patterns with “counterfactual” bots that follow rule-based trajectories, we show that, on average, relying exclusively on the YouTube recommender results in less partisan consumption, where the effect is most pronounced for heavy partisan consumers. Following a similar method, we also show that if partisan consumers switch to moderate content, YouTube’s sidebar recommender “forgets” their partisan preference within roughly 30 videos regardless of their prior history, while homepage recommendations shift more gradually toward moderate content. Overall, our findings indicate that, at least since the algorithm changes that YouTube implemented in 2019, individual consumption patterns mostly reflect individual preferences, where algorithmic recommendations play, if anything, a moderating role.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Whiting, Mark E.; Watts, Duncan J.
A framework for quantifying individual and collective common sense Journal Article
In: PNAS, vol. 121, iss. 4, 2024.
@article{nokey,
title = {A framework for quantifying individual and collective common sense },
author = {Whiting, Mark E. and Watts, Duncan J. },
doi = {https://doi.org/10.1073/pnas.2309535121},
year = {2024},
date = {2024-01-16},
urldate = {2024-01-16},
journal = {PNAS},
volume = {121},
issue = {4},
abstract = {The notion of common sense is invoked so frequently in contexts as diverse as everyday
conversation, political debates, and evaluations of artificial intelligence that its meaning might
be surmised to be unproblematic. Surprisingly, however, neither the intrinsic properties of
common sense knowledge (what makes a claim commonsensical) nor the degree to which it
is shared by people (it’s “commonness”) have been characterized empirically. In this paper,
we introduce an analytical framework for quantifying both these elements of common sense.
First, we define the commonsensicality of individual claims and people in terms of latter’s
propensity to agree on the former and their awareness of one another’s agreement. Second,
we formalize the commonness of common sense as a clique detection problem on a bipartite
belief graph of people and claims, defining pq common sense as the fraction q of claims shared
by a fraction p of people. Evaluating our framework on a dataset of 2,046 raters evaluating
4,407 diverse claims, we find that commonsensicality aligns most closely with plainly-worded,
fact-like statements about everyday physical reality. Psychometric attributes such as social
perceptiveness influence individual common sense, but surprisingly demographic factors
such as age or gender do not. Finally, we find that collective common sense is rare: at most a
small fraction p of people agree on more than a small fraction q of claims. Together, these
results undercut universalistic beliefs about common sense but also open new questions
about its variability of that are relevant both to human and artificial intelligence. },
keywords = {},
pubstate = {published},
tppubtype = {article}
}
conversation, political debates, and evaluations of artificial intelligence that its meaning might
be surmised to be unproblematic. Surprisingly, however, neither the intrinsic properties of
common sense knowledge (what makes a claim commonsensical) nor the degree to which it
is shared by people (it’s “commonness”) have been characterized empirically. In this paper,
we introduce an analytical framework for quantifying both these elements of common sense.
First, we define the commonsensicality of individual claims and people in terms of latter’s
propensity to agree on the former and their awareness of one another’s agreement. Second,
we formalize the commonness of common sense as a clique detection problem on a bipartite
belief graph of people and claims, defining pq common sense as the fraction q of claims shared
by a fraction p of people. Evaluating our framework on a dataset of 2,046 raters evaluating
4,407 diverse claims, we find that commonsensicality aligns most closely with plainly-worded,
fact-like statements about everyday physical reality. Psychometric attributes such as social
perceptiveness influence individual common sense, but surprisingly demographic factors
such as age or gender do not. Finally, we find that collective common sense is rare: at most a
small fraction p of people agree on more than a small fraction q of claims. Together, these
results undercut universalistic beliefs about common sense but also open new questions
about its variability of that are relevant both to human and artificial intelligence.
2023
Almaatouq, Abdullah; Alsobay, Mohammed; Yin, Ming; Watts, Duncan J.
The Effects of Group Composition and Dynamics on Collective Performance Journal Article
In: Topics in Cognitive Science, 2023.
@article{nokeybb,
title = {The Effects of Group Composition and Dynamics on Collective Performance},
author = {Almaatouq, Abdullah and Alsobay, Mohammed and Yin, Ming and Watts, Duncan J. },
url = {https://onlinelibrary.wiley.com/doi/full/10.1111/tops.12706},
doi = {10.1111/tops.12706},
year = {2023},
date = {2023-11-05},
journal = {Topics in Cognitive Science},
abstract = {As organizations gravitate to group-based structures, the problem of improving performance through judicious selection of group members has preoccupied scientists and managers alike. However, which individual attributes best predict group performance remains poorly understood. Here, we describe a preregistered experiment in which we simultaneously manipulated four widely studied attributes of group compositions: skill level, skill diversity, social perceptiveness, and cognitive style diversity. We find that while the average skill level of group members, skill diversity, and social perceptiveness are significant predictors of group performance, skill level dominates all other factors combined. Additionally, we explore the relationship between patterns of collaborative behavior and performance outcomes and find that any potential gains in solution quality from additional communication between the group members are outweighed by the overhead time cost, leading to lower overall efficiency. However, groups exhibiting more “turn-taking” behavior are considerably faster and thus more efficient. Finally, contrary to our expectation, we find that group compositional factors (i.e., skill level and social perceptiveness) are not associated with the amount of communication between group members nor turn-taking dynamics.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ribeiro, Manoel Horta; Hosseinmardi, Homa; West, Robert; Watts, Duncan J.
Deplatforming did not decrease Parler users’ activity on fringe social media Journal Article
In: PNAS Nexus, vol. 2, iss. 3, 2023.
@article{nokey,
title = {Deplatforming did not decrease Parler users’ activity on fringe social media},
author = {Ribeiro, Manoel Horta and Hosseinmardi, Homa and West, Robert and Watts, Duncan J.},
url = {https://academic.oup.com/pnasnexus/article/2/3/pgad035/7081430?login=false},
doi = {10.1093/pnasnexus/pgad035},
year = {2023},
date = {2023-03-21},
journal = {PNAS Nexus},
volume = {2},
issue = {3},
abstract = {Online platforms have banned (“deplatformed”) influencers, communities, and even entire websites to reduce content deemed harmful. Deplatformed users often migrate to alternative platforms, which raises concerns about the effectiveness of deplatforming. Here, we study the deplatforming of Parler, a fringe social media platform, between 2020 January 11 and 2021 February 25, in the aftermath of the US Capitol riot. Using two large panels that capture longitudinal user-level activity across mainstream and fringe social media content (N = 112, 705, adjusted to be representative of US desktop and mobile users), we find that other fringe social media, such as Gab and Rumble, prospered after Parler’s deplatforming. Further, the overall activity on fringe social media increased while Parler was offline. Using a difference-in-differences analysis (N = 996), we then identify the causal effect of deplatforming on active Parler users, finding that deplatforming increased the probability of daily activity across other fringe social media in early 2021 by 10.9 percentage points (pp) (95% CI [5.9 pp, 15.9 pp]) on desktop devices, and by 15.9 pp (95% CI [10.2 pp, 21.7 pp]) on mobile devices, without decreasing activity on fringe social media in general (including Parler). Our results indicate that the isolated deplatforming of a major fringe platform was ineffective at reducing overall user activity on fringe social media.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2022
Almaatouq, Abdullah; Griffiths, Thomas L.; Suchow, Jordan W.; Whiting, Mark E.; Evans, James; Watts, Duncan J.
Beyond Playing 20 Questions with Nature: Integrative Experiment Design in the Social and Behavioral Sciences Journal Article
In: Behavioral and Brain Sciences, 2022.
@article{nokeyb,
title = {Beyond Playing 20 Questions with Nature: Integrative Experiment Design in the Social and Behavioral Sciences },
author = {Almaatouq, Abdullah and Griffiths, Thomas L. and Suchow, Jordan W. and Whiting, Mark E. and Evans, James and Watts, Duncan J. },
url = {https://www.cambridge.org/core/journals/behavioral-and-brain-sciences/article/abs/beyond-playing-20-questions-with-nature-integrative-experiment-design-in-the-social-and-behavioral-sciences/7E0D34D5AE2EFB9C0902414C23E0C292#article},
doi = {10.1017/S0140525X22002874},
year = {2022},
date = {2022-12-21},
urldate = {2022-12-21},
journal = {Behavioral and Brain Sciences},
abstract = {The dominant paradigm of experiments in the social and behavioral sciences views an experiment as a test of a theory, where the theory is assumed to generalize beyond the experiment's specific conditions. According to this view, which Alan Newell once characterized as “playing twenty questions with nature,” theory is advanced one experiment at a time, and the integration of disparate findings is assumed to happen via the scientific publishing process. In this article, we argue that the process of integration is at best inefficient, and at worst it does not, in fact, occur. We further show that the challenge of integration cannot be adequately addressed by recently proposed reforms that focus on the reliability and replicability of individual findings, nor simply by conducting more or larger experiments. Rather, the problem arises from the imprecise nature of social and behavioral theories and, consequently, a lack of commensurability across experiments conducted under different conditions. Therefore, researchers must fundamentally rethink how they design experiments and how the experiments relate to theory. We specifically describe an alternative framework, integrative experiment design, which intrinsically promotes commensurability and continuous integration of knowledge. In this paradigm, researchers explicitly map the design space of possible experiments associated with a given research question, embracing many potentially relevant theories rather than focusing on just one. The researchers then iteratively generate theories and test them with experiments explicitly sampled from the design space, allowing results to be integrated across experiments. Given recent methodological and technological developments, we conclude that this approach is feasible and would generate more-reliable, more-cumulative empirical and theoretical knowledge than the current paradigm—and with far greater efficiency.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Milkman, Katherine L.; Gandhi, Linnea; Ellis, Sean F.; Graci, Heather N.; Gromet, Dena M.; Mobarak, Rayyan S.; Buttenheim, Alison M.; Duckworth, Angela L.; Pope, Devin; Stanford, Ala; Thaler, Richard; Volpp, Kevin G.
A citywide experiment testing the impact of geographically targeted, high-pay-off vaccine lotteries Journal Article
In: Nature Human Behavior, 2022, ISSN: 2397-3374.
@article{milkman2022citywide,
title = {A citywide experiment testing the impact of geographically targeted, high-pay-off vaccine lotteries},
author = {Katherine L. Milkman and Linnea Gandhi and Sean F. Ellis and Heather N. Graci and Dena M. Gromet and Rayyan S. Mobarak and Alison M. Buttenheim and Angela L. Duckworth and Devin Pope and Ala Stanford and Richard Thaler and Kevin G. Volpp },
url = {https://www.nature.com/articles/s41562-022-01437-0},
doi = {10.1038/s41562-022-01437-0},
issn = {2397-3374},
year = {2022},
date = {2022-09-01},
journal = {Nature Human Behavior},
abstract = {Lotteries have been shown to motivate behaviour change in many settings, but their value as a policy tool is relatively untested. We implemented a pre-registered, citywide experiment to test the effects of three high-pay-off, geographically targeted lotteries designed to motivate adult Philadelphians to get their COVID-19 vaccine. In each drawing, the residents of a randomly selected ‘treatment’ zip code received half the lottery prizes, boosting their chances of winning to 50×–100× those of other Philadelphians. The first treated zip code, which drew considerable media attention, may have experienced a small bump in vaccinations compared with the control zip codes: average weekly vaccinations rose by an estimated 61 per 100,000 people per week (+11%). After pooling the results from all three zip codes treated during our six-week experiment, however, we do not detect evidence of any overall benefits. Furthermore, our 95% confidence interval provides a 9% upper bound on the net benefits of treatment in our study.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Muise, Daniel; Hosseinmardi, Homa; Howland, Baird; Mobius, Markus; Rothschild, David; Watts, Duncan J.
Quantifying partisan news diets in Web and TV audiences Journal Article
In: Science Advances, vol. 8, iss. 28, 2022.
@article{muise2022quantifying,
title = {Quantifying partisan news diets in Web and TV audiences},
author = {Daniel Muise and Homa Hosseinmardi and Baird Howland and Markus Mobius and David Rothschild and Duncan J. Watts},
url = {https://www.science.org/doi/10.1126/sciadv.abn0083},
doi = {10.1126/sciadv.abn0083},
year = {2022},
date = {2022-07-13},
urldate = {2022-07-13},
journal = {Science Advances},
volume = {8},
issue = {28},
abstract = {Partisan segregation within the news audience buffers many Americans from countervailing political views, posing a risk to democracy. Empirical studies of the online media ecosystem suggest that only a small minority of Americans, driven by a mix of demand and algorithms, are siloed according to their political ideology. However, such research omits the comparatively larger television audience and often ignores temporal dynamics underlying news consumption. By analyzing billions of browsing and viewing events between 2016 and 2019, with a novel framework for measuring partisan audiences, we first estimate that 17% of Americans are partisan-segregated through television versus roughly 4% online. Second, television news consumers are several times more likely to maintain their partisan news diets month-over-month. Third, TV viewers’ news diets are far more concentrated on preferred sources. Last, partisan news channels’ audiences are growing even as the TV news audience is shrinking. Our results suggest that television is the top driver of partisan audience segregation among Americans.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2021
Balietti, Stefano; Getoor, Lise; Goldstein, Daniel G.; Watts, Duncan J.
Reducing opinion polarization: Effects of exposure to similar people with differing political views Journal Article
In: Proceedings of the National Academy of Sciences, vol. 118, no. 52, 2021.
@article{balietti2021reducing,
title = {Reducing opinion polarization: Effects of exposure to similar people with differing political views},
author = {Balietti, Stefano and Getoor, Lise and Goldstein, Daniel G. and Watts, Duncan J.},
editor = {Jackson, Matthew},
url = {https://www.pnas.org/content/118/52/e2112552118.short},
doi = {10.1073/pnas.2112552118},
year = {2021},
date = {2021-12-28},
journal = {Proceedings of the National Academy of Sciences},
volume = {118},
number = {52},
abstract = {In a large-scale, preregistered experiment on informal political communication, we algorithmically matched participants, varying two dimensions: 1) the degree of incidental similarity on nonpolitical features; and 2) their stance agreement on a contentious political topic. Matched participants were first shown a computer-generated social media profile of their match highlighting all the shared nonpolitical features; then, they read a short, personal, but argumentative, essay written by their match about the reduction of inequality via redistribution of wealth by the government. We show that support for redistribution increased and polarization decreased for participants with both mild and strong views, regardless of their political leaning. We further show that feeling close to the match is associated with an 86% increase in the probability of assimilation of political views. Our analysis also uncovers an asymmetry: Interacting with someone with opposite views greatly reduced feelings of closeness; however, interacting with someone with consistent views only moderately increased them. By extending previous work about the effects of incidental similarity and shared identity on affect into the domain of political opinion change, our results bear real-world implications for the (re)-design of social media platforms. Because many people prefer to keep politics outside of their social networks, encouraging cross-cutting political communication based on nonpolitical commonalities is a potential solution for fostering consensus on potentially divisive and partisan topics.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lifchits, George; Anderson, Ashton; Goldstein, Daniel G.; Hofman, Jake M.; Watts, Duncan J.
Success stories cause false beliefs about success Journal Article
In: Judgment and Decision Making, vol. 16, no. 6, pp. 1439-1463, 2021, ISSN: 1930-2975.
@article{lifchits2021success,
title = {Success stories cause false beliefs about success},
author = {Lifchits, George and Anderson, Ashton and Goldstein, Daniel G. and Hofman, Jake M. and Watts, Duncan J.},
url = {http://journal.sjdm.org/21/210225/jdm210225.pdf},
issn = {1930-2975},
year = {2021},
date = {2021-11-30},
journal = {Judgment and Decision Making},
volume = {16},
number = {6},
pages = {1439-1463},
abstract = {Many popular books and articles that purport to explain how people, companies, or ideas succeed highlight a few successes chosen to fit a particular narrative. We investigate what effect these highly selected “success narratives” have on readers’ beliefs and decisions. We conducted a large, randomized, pre-registered experiment, showing participants successful firms with founders that all either dropped out of or graduated college, and asked them to make incentive-compatible bets on a new firm. Despite acknowledging biases in the examples, participants’ decisions were very strongly influenced by them. People shown dropout founders were 55 percentage points more likely to bet on a dropout-founded company than people who were shown graduate founders. Most reported medium to high confidence in their bets, and many wrote causal explanations justifying their decision. In light of recent concerns about false information, our findings demonstrate how true but biased information can strongly alter beliefs and decisions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Almaatouq, Abdullah; Alsobay, Mohammed; Yin, Ming; Watts, Duncan J.
Task complexity moderates group synergy Journal Article
In: Proceedings of the National Academy of Sciences, vol. 118, no. 36, 2021.
@article{almaatouq2021task,
title = {Task complexity moderates group synergy},
author = {Almaatouq, Abdullah and Alsobay, Mohammed and Yin, Ming and Watts, Duncan J.},
url = {https://www.pnas.org/content/118/36/e2101062118},
doi = {10.1073/pnas.2101062118},
year = {2021},
date = {2021-09-07},
journal = {Proceedings of the National Academy of Sciences},
volume = {118},
number = {36},
abstract = {Complexity—defined in terms of the number of components and the nature of the interdependencies between them—is clearly a relevant feature of all tasks that groups perform. Yet the role that task complexity plays in determining group performance remains poorly understood, in part because no clear language exists to express complexity in a way that allows for straightforward comparisons across tasks. Here we avoid this analytical difficulty by identifying a class of tasks for which complexity can be varied systematically while keeping all other elements of the task unchanged. We then test the effects of task complexity in a preregistered two-phase experiment in which 1,200 individuals were evaluated on a series of tasks of varying complexity (phase 1) and then randomly assigned to solve similar tasks either in interacting groups or as independent individuals (phase 2). We find that interacting groups are as fast as the fastest individual and more efficient than the most efficient individual for complex tasks but not for simpler ones. Leveraging our highly granular digital data, we define and precisely measure group process losses and synergistic gains and show that the balance between the two switches signs at intermediate values of task complexity. Finally, we find that interacting groups generate more solutions more rapidly and explore the solution space more broadly than independent problem solvers, finding higher-quality solutions than all but the highest-scoring individuals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Konitzer, Tobias; Allen, Jennifer; Eckman, Stephanie; Howland, Baird; Mobius, Markus; Rothschild, David; Watts, Duncan J.
Comparing Estimates of News Consumption from Survey and Passively Collected Behavioral Data Journal Article
In: Public Opinion Quarterly, 2021.
@article{konitzer2021comparing,
title = {Comparing Estimates of News Consumption from Survey and Passively Collected Behavioral Data},
author = {Konitzer, Tobias and Allen, Jennifer and Eckman, Stephanie and Howland, Baird and Mobius, Markus and Rothschild, David and Watts, Duncan J.},
url = {https://academic.oup.com/poq/advance-article/doi/10.1093/poq/nfab023/6356135?login=true},
doi = {10.1093/poq/nfab023},
year = {2021},
date = {2021-08-21},
urldate = {2021-08-21},
journal = {Public Opinion Quarterly},
abstract = {Surveys are a vital tool for understanding public opinion and knowledge, but they can also yield biased estimates of behavior. Here we explore a popular and important behavior that is frequently measured in public opinion surveys: news consumption. Previous studies have shown that television news consumption is consistently overreported in surveys relative to passively collected behavioral data. We validate these earlier findings, showing that they continue to hold despite large shifts in news consumption habits over time, while also adding some new nuance regarding question wording. We extend these findings to survey reports of online and social media news consumption, with respect to both levels and trends. Third, we demonstrate the usefulness of passively collected data for measuring a quantity such as “consuming news” for which different researchers might reasonably choose different definitions. Finally, recognizing that passively collected data suffers from its own limitations, we outline a framework for using a mix of passively collected behavioral and survey-generated attitudinal data to accurately estimate consumption of news and related effects on public opinion and knowledge, conditional on media consumption.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hosseinmardi, Homa; Ghasemian, Amir; Clauset, Aaron; Mobius, Markus; Rothschild, David M.; Watts, Duncan J.
Examining the consumption of radical content on YouTube Journal Article
In: Proceedings of the National Academy of Sciences, vol. 118, no. 32, 2021.
@article{hosseinmardi2021examining,
title = {Examining the consumption of radical content on YouTube},
author = {Homa Hosseinmardi and Amir Ghasemian and Aaron Clauset and Markus Mobius and David M. Rothschild and Duncan J. Watts},
url = {https://www.pnas.org/content/118/32/e2101967118},
doi = {10.1073/pnas.2101967118},
year = {2021},
date = {2021-08-10},
journal = {Proceedings of the National Academy of Sciences},
volume = {118},
number = {32},
abstract = {Although it is under-studied relative to other social media platforms, YouTube is arguably the largest and most engaging online media consumption platform in the world. Recently, YouTube’s scale has fueled concerns that YouTube users are being radicalized via a combination of biased recommendations and ostensibly apolitical “anti-woke” channels, both of which have been claimed to direct attention to radical political content. Here we test this hypothesis using a representative panel of more than 300,000 Americans and their individual-level browsing behavior, on and off YouTube, from January 2016 through December 2019. Using a labeled set of political news channels, we find that news consumption on YouTube is dominated by mainstream and largely centrist sources. Consumers of far-right content, while more engaged than average, represent a small and stable percentage of news consumers. However, consumption of “anti-woke” content, defined in terms of its opposition to progressive intellectual and political agendas, grew steadily in popularity and is correlated with consumption of far-right content off-platform. We find no evidence that engagement with far-right content is caused by YouTube recommendations systematically, nor do we find clear evidence that anti-woke channels serve as a gateway to the far right. Rather, consumption of political content on YouTube appears to reflect individual preferences that extend across the web as a whole.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Allen, Jennifer; Mobius, Markus; Rothschild, David M.; Watts, Duncan J.
Research note: Examining potential bias in large-scale censored data Journal Article
In: Harvard Kennedy School (HKS) Misinformation Review, 2021.
@article{allen2021research,
title = {Research note: Examining potential bias in large-scale censored data},
author = {Jennifer Allen and Markus Mobius and David M. Rothschild and Duncan J. Watts},
url = {https://misinforeview.hks.harvard.edu/article/research-note-examining-potential-bias-in-large-scale-censored-data/},
doi = {10.37016/mr-2020-74},
year = {2021},
date = {2021-07-26},
journal = {Harvard Kennedy School (HKS) Misinformation Review},
abstract = {We examine potential bias in Facebook’s 10-trillion cell URLs dataset, consisting of URLs shared on its platform and their engagement metrics. Despite the unprecedented size of the dataset, it was altered to protect user privacy in two ways: 1) by adding differentially private noise to engagement counts, and 2) by censoring the data with a 100-public-share threshold for a URL’s inclusion. To understand how these alterations affect conclusions drawn from the data, we estimate the prevalence of fake news in the massive, censored URLs dataset and compare it to an estimate from a smaller, representative dataset. We show that censoring can substantially alter conclusions that are drawn from the Facebook dataset. Because of this 100-public-share threshold, descriptive statistics from the Facebook URLs dataset overestimate the share of fake news and news overall by as much as 4X. We conclude with more general implications for censoring data.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hofman, Jake M.; Watts, Duncan J.; Athey, Susan; Garip, Filiz; Griffiths, Thomas L.; Kleinberg, Jon; Margetts, Helen; Mullainathan, Sendhil; Salganik, Matthew J.; Vazire, Simine; Vespignani, Alessandro; Yarkoni, Tal
Integrating explanation and prediction in computational social science Journal Article
In: Nature, vol. 595, pp. 181-188, 2021.
@article{hofman2021integrating,
title = {Integrating explanation and prediction in computational social science},
author = {Jake M. Hofman and Duncan J. Watts and Susan Athey and Filiz Garip and Thomas L. Griffiths and Jon Kleinberg and Helen Margetts and Sendhil Mullainathan and Matthew J. Salganik and Simine Vazire and Alessandro Vespignani and Tal Yarkoni},
url = {https://drive.google.com/file/d/1jTnZgkBUh13PQSSmFDAv5UU-yGQB9xWV/view?usp=sharing},
doi = {10.1038/s41586-021-03659-0},
year = {2021},
date = {2021-06-30},
journal = {Nature},
volume = {595},
pages = {181-188},
abstract = {Computational social science is more than just large repositories of digital data and the computational methods needed to construct and analyse them. It also represents a convergence of different fields with different ways of thinking about and doing science. The goal of this Perspective is to provide some clarity around how these approaches differ from one another and to propose how they might be productively integrated. Towards this end we make two contributions. The first is a schema for thinking about research activities along two dimensions—the extent to which work is explanatory, focusing on identifying and estimating causal effects, and the degree of consideration given to testing predictions of outcomes—and how these two priorities can complement, rather than compete with, one another. Our second contribution is to advocate that computational social scientists devote more attention to combining prediction and explanation, which we call integrative modelling, and to outline some practical suggestions for realizing this goal.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Almaatouq, Abdullah; Becker, Joshua; Bernstein, Michael S.; Botto, Robert; Bradlow, Eric T.; Damer, Ekaterina; Duckworth, Angela; Griffiths, Tom; Hartshorne, Joshua K.; Lazer, David; Law, Edith; Liu, Min; Matias, J. Nathan; Rand, David; Salganik, Matthew; Emma Satlof-Bedrick, Maurice Schweitzer; Shirado, Hirokazu; Suchow, Jordan W.; Suri, Siddharth; Tsvetkova, Milena; Watts, Duncan J.; Whiting, Mark E.; Yin., Ming
Scaling up experimental social, behavioral, and economic science Technical Report
2021.
@techreport{almaatouq2021scaling,
title = {Scaling up experimental social, behavioral, and economic science},
author = {Abdullah Almaatouq and Joshua Becker and Michael S. Bernstein and Robert Botto and Eric T. Bradlow and Ekaterina Damer and Angela Duckworth and Tom Griffiths and Joshua K. Hartshorne and David Lazer and Edith Law and Min Liu and J. Nathan Matias and David Rand and Matthew Salganik and Emma Satlof-Bedrick, Maurice Schweitzer and Hirokazu Shirado and Jordan W. Suchow and Siddharth Suri and Milena Tsvetkova and Duncan J. Watts and Mark E. Whiting and Ming Yin.},
url = {https://drive.google.com/file/d/1-4kcD8yn4dTikxrbbm5oaGnaEvj3XQ5B/view?usp=sharing},
doi = {10.17605/OSF.IO/KNVJS},
year = {2021},
date = {2021-06-29},
urldate = {2021-06-29},
pages = {40},
abstract = {The standard experimental paradigm in the social, behavioral, and economic sciences is extremely limited. Although recent advances in digital technologies and crowdsourcing services allow individual experiments to be deployed and run faster than in traditional physical labs, a majority of experiments still focus on one-off results that do not generalize easily to real-world contexts or even to other variations of the same experiment. As a result, there exist few universally acknowledged findings, and even those are occasionally overturned by new data. We argue that to achieve replicable, generalizable, scalable and ultimately useful social and behavioral science, a fundamental rethinking of the model of virtual-laboratory style experiments is required. Not only is it possible to design and run experiments that are radically different in scale and scope than was possible in an era of physical labs; this ability allows us to ask fundamentally different types of questions than have been asked historically of lab studies. We argue, however, that taking full advantage of this new and exciting potential will require four major changes to the infrastructure, methodology, and culture of experimental science: (1) significant investments in software design and participant recruitment, (2) innovations in experimental design and analysis of experimental data, (3) adoption of new models of collaboration, and (4) a new understanding of the nature and role of theory in experimental social and behavioral science. We conclude that the path we outline, although ambitious, is well within the power of current technology and has the potential to facilitate a new class of scientific advances in social, behavioral and economic studies.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Barreras, Francisco; Hayhoe, Mikhail; Hassani, Hamed; Preciado, Victor M.
AutoEKF: Scalable System Identification for COVID-19 Forecasting from Large-Scale GPS Data Journal Article
In: arXiv Preprint, pp. 6, 2021.
@article{barreras2021autoekf,
title = {AutoEKF: Scalable System Identification for COVID-19 Forecasting from Large-Scale GPS Data},
author = {Francisco Barreras and Mikhail Hayhoe and Hamed Hassani and Victor M. Preciado},
url = {https://arxiv.org/abs/2106.14357},
year = {2021},
date = {2021-06-28},
journal = {arXiv Preprint},
pages = {6},
abstract = {We present an Extended Kalman Filter framework for system identification and control of a stochastic high-dimensional epidemic model. The scale and severity of the COVID-19 emergency have highlighted the need for accurate forecasts of the state of the pandemic at a high resolution. Mechanistic compartmental models are widely used to produce such forecasts and assist in the design of control and relief policies. Unfortunately, the scale and stochastic nature of many of these models often makes the estimation of their parameters difficult. With the goal of calibrating a high dimensional COVID-19 model using low-level mobility data, we introduce a method for tractable maximum likelihood estimation that combines tools from Bayesian inference with scalable optimization techniques from machine learning. The proposed approach uses automatic backward-differentiation to directly compute the gradient of the likelihood of COVID-19 incidence and death data. The likelihood of the observations is estimated recursively using an Extended Kalman Filter and can be easily optimized using gradient-based methods to compute maximum likelihood estimators. Our compartmental model is trained using GPS mobility data that measures the mobility patterns of millions of mobile phones across the United States. We show that, after calibrating against incidence and deaths data from the city of Philadelphia, our model is able to produce an accurate 30-day forecast of the evolution of the pandemic.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hu, Xinlan Emily; Whiting, Mark; Bernstein, Michael
Can Online Juries Make Consistent, Repeatable Decisions? Journal Article
In: Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, pp. 1-16, 2021.
@article{hu2021can,
title = {Can Online Juries Make Consistent, Repeatable Decisions?},
author = {Xinlan Emily Hu and Mark Whiting and Michael Bernstein},
url = {https://www.researchgate.net/profile/Mark-Whiting-2/publication/351417423_Can_Online_Juries_Make_Consistent_Repeatable_Decisions/links/60a63ff7a6fdcc731d3ecc47/Can-Online-Juries-Make-Consistent-Repeatable-Decisions.pdf},
doi = {10.1145/3411764.3445433},
year = {2021},
date = {2021-05-06},
journal = {Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems},
pages = {1-16},
abstract = {A jury of one’s peers is a prominent way to adjudicate disputes and is increasingly used in participatory governance online. The fairness of this approach rests on the assumption that juries are consistent: that the same jury would hand down similar judgments to similar cases. However, prior literature suggests that social influence would instead cause early interactions to cascade into different judgments for similar cases. In this paper, we report an online experiment that changes participants’ pseudonyms as they appear to collaborators, temporarily masking a jury’s awareness that they have deliberated together before. This technique allows us to measure consistency by reconvening the same jury on similar cases. Counter to expectation, juries are equally consistent as individuals, a result that is “good for democracy.” But this consistency arises in part due to group polarization, as consensus develops by hardening initial majority opinions. Furthermore, we find that aggregating groups’ perspectives without deliberation erodes consistency.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gamage, Dilrukshi; Staubitz, Thomas; Whiting, Mark
Peer assessment in MOOCs: Systematic literature review Journal Article
In: Distance Education, pp. 1-22, 2021.
@article{gamage2021peer,
title = {Peer assessment in MOOCs: Systematic literature review},
author = {Dilrukshi Gamage and Thomas Staubitz and Mark Whiting},
url = {https://www.researchgate.net/profile/Dilrukshi-Gamage-2/publication/351344733_Peer_assessment_in_MOOCs_Systematic_literature_review/links/60923fc3a6fdccaebd093ccc/Peer-assessment-in-MOOCs-Systematic-literature-review.pdf},
doi = {10.1080/01587919.2021.1911626},
year = {2021},
date = {2021-05-03},
journal = {Distance Education},
pages = {1-22},
abstract = {We report on a systematic review of the landscape of peer assessment in massive open online courses (MOOCs) with papers from 2014 to 2020 in 20 leading education technology publication venues across four databases containing education technology–related papers, addressing three research issues: the evolution of peer assessment in MOOCs during the period 2014 to 2020, the methods used in MOOCs to assess peers, and the challenges of and future directions in MOOC peer assessment. We provide summary statistics and a review of methods across the corpus and highlight three directions for improving the use of peer assessment in MOOCs: the need for focusing on scaling learning through peer evaluations, the need for scaling and optimizing team submissions in team peer assessments, and the need for embedding a social process for peer assessment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jacobs, Abigail Z.; Watts, Duncan J.
A Large-Scale Comparative Study of Informal Social Networks in Firms Journal Article
In: Management Science, pp. 1-21, 2021.
@article{jacobs2021a,
title = {A Large-Scale Comparative Study of Informal Social Networks in Firms},
author = {Abigail Z. Jacobs and Duncan J. Watts},
url = {https://drive.google.com/file/d/18QfFiq5EYq5BY6FAfkoH8TYq98Tx6JjE/view?usp=sharing},
doi = {10.1287/mnsc.2021.3997},
year = {2021},
date = {2021-04-16},
urldate = {2021-04-16},
journal = {Management Science},
pages = {1-21},
abstract = {Theories of organizations are sympathetic to long-standing ideas from network science that organizational networks should be regarded as multiscale and capable of displaying emergent properties. However, the historical difficulty of collecting individual-level network data for many (N ≫ 1) organizations, each of which comprises many (n ≫ 1) individuals, has hobbled efforts to develop specific, theoretically motivated hypotheses connecting micro- (i.e., individual-level) network structure with macro-organizational properties. In this paper we seek to stimulate such efforts with an exploratory analysis of a unique data set of aggregated, anonymized email data from an enterprise email system that includes 1.8 billion messages sent by 1.4 million users from 65 publicly traded U.S. firms spanning a wide range of sizes and 7 industrial sectors. We uncover wide heterogeneity among firms with respect to all measured network characteristics, and we find robust network and organizational variation as a result of size. Interestingly, we find no clear associations between organizational network structure and firm age, industry, or performance; however, we do find that centralization increases with geographical dispersion—a result that is not explained by network size. Although preliminary, these results raise new questions for organizational theory as well as new issues for collecting, processing, and interpreting digital network data.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cao, Hancheng; Yang, Vivian; Chen, Victor; Lee, Yu Jin; Stone, Lydia; Diarrassouba, N'godjigui Junior; Whiting, Mark; Bernstein, Michael
My Team Will Go On: Differentiating High and Low Viability Teams through Team Interaction Journal Article
In: Proceedings of the ACM Human-Computer Interaction, vol. 4, no. CSCW3, pp. 1-27, 2021.
@article{cao2021my,
title = {My Team Will Go On: Differentiating High and Low Viability Teams through Team Interaction},
author = {Hancheng Cao and Vivian Yang and Victor Chen and Yu Jin Lee and Lydia Stone and N'godjigui Junior Diarrassouba and Mark Whiting and Michael Bernstein},
url = {https://arxiv.org/pdf/2010.07292},
doi = {10.1145/nnnnnnn.nnnnnnn},
year = {2021},
date = {2021-01-05},
journal = {Proceedings of the ACM Human-Computer Interaction},
volume = {4},
number = {CSCW3},
pages = {1-27},
abstract = {Understanding team viability — a team’s capacity for sustained and future success — is essential for building effective teams. In this study, we aggregate features drawn from the organizational behavior literature to train a viability classification model over a dataset of 669 10-minute text conversations of online teams. We train classifiers to identify teams at the top decile (most viable teams), 50th percentile (above a median split), and bottom decile (least viable teams), then characterize the attributes of teams at each of these viability levels. We find that a lasso regression model achieves an accuracy of .74–.92 AUC ROC under different thresholds of classifying viability scores. From these models, we identify the use of exclusive language such as ‘but’ and ‘except’, and the use of second person pronouns, as the most predictive features for detecting the most viable teams, suggesting that active engagement with others’ ideas is a crucial signal of a viable team. Only a small fraction of the 10-minute discussion, as little as 70 seconds, is required for predicting the viability of team interaction. This work suggests opportunities for teams to assess, track, and visualize their own viability in real time as they collaborate.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Almaatouq, Abdullah; Becker, Joshua; Houghton, James P; Paton, Nicolas; Watts, Duncan J; Whiting, Mark E
Empirica: a virtual lab for high-throughput macro-level experiments Journal Article
In: Behavior Research Methods, pp. 1–14, 2021.
@article{almaatouq2021empirica,
title = {Empirica: a virtual lab for high-throughput macro-level experiments},
author = {Abdullah Almaatouq and Joshua Becker and James P Houghton and Nicolas Paton and Duncan J Watts and Mark E Whiting},
url = {https://drive.google.com/file/d/1asyd8onp_2df10W1AbzPOyar6WknEiCU/view},
doi = {10.3758/s13428-020-01535-9},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {Behavior Research Methods},
pages = {1--14},
publisher = {Springer},
abstract = {Virtual labs allow researchers to design high-throughput and macro-level experiments that are not feasible in traditional in-person physical lab settings. Despite the increasing popularity of online research, researchers still face many technical and logistical barriers when designing and deploying virtual lab experiments. While several platforms exist to facilitate the development of virtual lab experiments, they typically present researchers with a stark trade-off between usability and functionality. We introduce Empirica: a modular virtual lab that offers a solution to the usability--functionality trade-off by employing a ``flexible defaults'' design strategy. This strategy enables us to maintain complete ``build anything'' flexibility while offering a development platform that is accessible to novice programmers. Empirica's architecture is designed to allow for parameterizable experimental designs, reusable protocols, and rapid development. These features will increase the accessibility of virtual lab experiments, remove barriers to innovation in experiment design, and enable rapid progress in the understanding of human behavior.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Watts, Duncan J; Rothschild, David M; Mobius, Markus
Measuring the news and its impact on democracy Journal Article
In: Proceedings of the National Academy of Sciences, vol. 118, no. 15, 2021.
@article{watts2021measuring,
title = {Measuring the news and its impact on democracy},
author = {Duncan J Watts and David M Rothschild and Markus Mobius},
url = {https://drive.google.com/file/d/1FiI7IKkNzdMB8DfvlQl9k5U5w4RyyKT9/view},
doi = {10.1073/pnas.1912443118},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {Proceedings of the National Academy of Sciences},
volume = {118},
number = {15},
publisher = {National Acad Sciences},
abstract = {Since the 2016 US presidential election, the deliberate spread of misinformation online, and on social media in particular, has generated extraordinary concern, in large part because of its potential effects on public opinion, political polarization, and ultimately democratic decision making. Recently, however, a handful of papers have argued that both the prevalence and consumption of ``fake news'' per se is extremely low compared with other types of news and news-relevant content. Although neither prevalence nor consumption is a direct measure of influence, this work suggests that proper understanding of misinformation and its effects requires a much broader view of the problem, encompassing biased and misleading---but not necessarily factually incorrect---information that is routinely produced or amplified by mainstream news organizations. In this paper, we propose an ambitious collective research agenda to measure the origins, nature, and prevalence of misinformation, broadly construed, as well as its impact on democracy. We also sketch out some illustrative examples of completed, ongoing, or planned research projects that contribute to this agenda.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2020
Ghasemian, Amir; Hosseinmardi, Homa; Galstyan, Aram; Airoldi, Edoardo; Clauset, Aaron
Stacking models for nearly optimal link prediction in complex networks Journal Article
In: Proceedings of the National Academy of Sciences, vol. 117, no. 38, pp. 23393-23400, 2020.
@article{ghasemian2020stacking,
title = {Stacking models for nearly optimal link prediction in complex networks},
author = {Amir Ghasemian and Homa Hosseinmardi and Aram Galstyan and Edoardo Airoldi and Aaron Clauset},
url = {https://www.pnas.org/content/117/38/23393},
doi = {10.1073/pnas.1914950117},
year = {2020},
date = {2020-09-22},
journal = {Proceedings of the National Academy of Sciences},
volume = {117},
number = {38},
pages = {23393-23400},
abstract = {Most real-world networks are incompletely observed. Algorithms that can accurately predict which links are missing can dramatically speed up network data collection and improve network model validation. Many algorithms now exist for predicting missing links, given a partially observed network, but it has remained unknown whether a single best predictor exists, how link predictability varies across methods and networks from different domains, and how close to optimality current methods are. We answer these questions by systematically evaluating 203 individual link predictor algorithms, representing three popular families of methods, applied to a large corpus of 550 structurally diverse networks from six scientific domains. We first show that individual algorithms exhibit a broad diversity of prediction errors, such that no one predictor or family is best, or worst, across all realistic inputs. We then exploit this diversity using network-based metalearning to construct a series of “stacked” models that combine predictors into a single algorithm. Applied to a broad range of synthetic networks, for which we may analytically calculate optimal performance, these stacked models achieve optimal or nearly optimal levels of accuracy. Applied to real-world networks, stacked models are superior, but their accuracy varies strongly by domain, suggesting that link prediction may be fundamentally easier in social networks than in biological or technological networks. These results indicate that the state of the art for link prediction comes from combining individual algorithms, which can achieve nearly optimal predictions. We close with a brief discussion of limitations and opportunities for further improvements.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lazer, David M. J.; Pentland, Alex; Watts, Duncan J.; Aral, Sinan; Athey, Susan; Contractor, Noshir; Freelon, Deen; Gonzalez-Bailon, Sandra; King, Gary; Margetts, Helen; Nelson, Alondra; Salganik, Matthew J.; Strohmaier, Markus; Vespignani, Alessandro; Wagner, Claudia
Computational social science: Obstacles and opportunities Journal Article
In: Science, vol. 369, no. 6507, pp. 1060-1062, 2020.
@article{lazer2020computational,
title = {Computational social science: Obstacles and opportunities},
author = {David M. J. Lazer and Alex Pentland and Duncan J. Watts and Sinan Aral and Susan Athey and Noshir Contractor and Deen Freelon and Sandra Gonzalez-Bailon and Gary King and Helen Margetts and Alondra Nelson and Matthew J. Salganik and Markus Strohmaier and Alessandro Vespignani and Claudia Wagner},
url = {https://drive.google.com/file/d/1otXwE9goSI6AbL7P_QqihTCJsu-ReAiI/view?usp=sharing},
doi = {10.1126/science.aaz8170},
year = {2020},
date = {2020-08-28},
urldate = {2020-08-28},
journal = {Science},
volume = {369},
number = {6507},
pages = {1060-1062},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Whiting, Mark; Bernstein, Michael
Can We Just Start Over Again? Resetting Remote Team Dynamics Journal Article
In: 2020.
@article{whiting2020can,
title = {Can We Just Start Over Again? Resetting Remote Team Dynamics},
author = {Mark Whiting and Michael Bernstein},
url = {https://www.microsoft.com/en-us/research/uploads/prod/2020/07/NFW-Whiting-et-al.pdf},
year = {2020},
date = {2020-07-07},
abstract = {Interactions defining teamwork today are heavily influenced by constraints and expectations found in in-person teams, however, remote collaboration provides the opportunity to try new ways to make teams work. One foundation of teamwork is persistent identity—we are who we were last time we worked together. Breaking with the expectation of in-person teams, we present a system that affords discontinuous identity using two-way pseudonym masking—enabling teams with new behaviors to arise from the same group of individuals. With this scaffold, a novel family of experiments, comparing the same group across multiple fresh starts, are possible. Further, interventions that involve choosing between versions of the same team are unlocked. We present an overview of experiments and interventions leveraging this system, and propose methods for its broader use in organizations enacting the future of work.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Whiting, Mark; Gao, Irena; Xing, Michelle; N’godjigui, Junior Diarrassouba; Nguyen, Tonya; Bernstein, Michael
Parallel Worlds: Repeated Initializations of the Same Team To Improve Team Viability Journal Article
In: Proceedings of the ACM Human-Computer Interaction, vol. 4, no. CSCW1, pp. 22, 2020.
@article{whiting2020parallel,
title = {Parallel Worlds: Repeated Initializations of the Same Team To Improve Team Viability},
author = {Mark Whiting and Irena Gao and Michelle Xing and Junior Diarrassouba N’godjigui and Tonya Nguyen and Michael Bernstein},
url = {https://www.researchgate.net/profile/Mark_Whiting3/publication/341743251_Parallel_Worlds_Repeated_Initializations_of_the_Same_Team_to_Improve_Team_Viability/links/5ed6931045851529452a2738/Parallel-Worlds-Repeated-Initializations-of-the-Same-Team-to-Improve-Team-Viability.pdf},
doi = {10.1145/3392877},
year = {2020},
date = {2020-05-01},
journal = {Proceedings of the ACM Human-Computer Interaction},
volume = {4},
number = {CSCW1},
pages = {22},
abstract = {A team’s early interactions are influential: small behaviors cascade, driving the team either toward successful collaboration or toward fracture. Would a team be more viable if it could undo initial interactional missteps and try again? We introduce a technique that supports online and remote teams in creating multiple parallel worlds: the same team meets many times, led to believe that each convening is with a new team due to pseudonym masking while actual membership remains static. Afterward, the team moves forward with the parallel world with the highest viability by using the same pseudonyms and conversation history from that instance. In two experiments, we find that this technique improves team viability: teams that are reconvened from the highest-viability parallel world are significantly more viable than the same group meeting in a new parallel world. Our work suggests parallel worlds can help teams start off on the right foot — and stay there.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Allen, Jennifer; Howland, Baird; Mobius, Markus; Rothschild, David; Watts, Duncan J
Evaluating the fake news problem at the scale of the information ecosystem Journal Article
In: Science Advances, vol. 6, no. 14, pp. eaay3539, 2020.
@article{allen2020evaluating,
title = {Evaluating the fake news problem at the scale of the information ecosystem},
author = {Jennifer Allen and Baird Howland and Markus Mobius and David Rothschild and Duncan J Watts},
url = {https://drive.google.com/file/d/15fa72A6LcHAY8tjlQa96kSDeA0xIIdFY/view},
doi = {10.1126/sciadv.aay3539},
year = {2020},
date = {2020-01-01},
urldate = {2020-01-01},
journal = {Science Advances},
volume = {6},
number = {14},
pages = {eaay3539},
publisher = {American Association for the Advancement of Science},
abstract = {``Fake news,'' broadly defined as false or misleading information masquerading as legitimate news, is frequently asserted to be pervasive online with serious consequences for democracy. Using a unique multimode dataset that comprises a nationally representative sample of mobile, desktop, and television consumption, we refute this conventional wisdom on three levels. First, news consumption of any sort is heavily outweighed by other forms of media consumption, comprising at most 14.2% of Americans' daily media diets. Second, to the extent that Americans do consume news, it is overwhelmingly from television, which accounts for roughly five times as much as news consumption as online. Third, fake news comprises only 0.15% of Americans' daily media diet. Our results suggest that the origins of public misinformedness and polarization are more likely to lie in the content of ordinary news or the avoidance of news altogether as they are in overt fakery.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2019
Whiting, Mark; Blaising, Allie; Barreau, Chloe; Fiuza, Laura; Marda, Nik; Valentine, Melissa; Bernstein, Michael
Did It Have To End This Way? Understanding the Consistency of Team Fracture Journal Article
In: Proceedings of the ACM Human-Computer Interaction, vol. 3, no. 209, pp. 23, 2019.
@article{whiting2019did,
title = {Did It Have To End This Way? Understanding the Consistency of Team Fracture},
author = {Mark Whiting and Allie Blaising and Chloe Barreau and Laura Fiuza and Nik Marda and Melissa Valentine and Michael Bernstein},
url = {https://dl.acm.org/doi/pdf/10.1145/3359311},
doi = {10.1145/3359311},
year = {2019},
date = {2019-11-01},
journal = {Proceedings of the ACM Human-Computer Interaction},
volume = {3},
number = {209},
pages = {23},
abstract = {Was a problematic team always doomed to frustration, or could it have ended another way? In this paper, we study the consistency of team fracture: a loss of team viability so severe that the team no longer wants to work together. Understanding whether team fracture is driven by the membership of the team, or by how their collaboration unfolded, motivates the design of interventions that either identify compatible teammates or ensure effective early interactions. We introduce an online experiment that reconvenes the same team without members realizing that they have worked together before, enabling us to temporarily erase previous team dynamics. Participants in our study completed a series of tasks across multiple teams, including one reconvened team, and privately blacklisted any teams that they would not want to work with again. We identify fractured teams as those blacklisted by half the members. We find that reconvened teams are strikingly polarized by task in the consistency of their fracture outcomes. On a creative task, teams might as well have been a completely different set of people: the same teams changed their fracture outcomes at a random chance rate. On a cognitive conflict and on an intellective task, the team instead replayed the same dynamics without realizing it, rarely changing their fracture outcomes. These results indicate that, for some tasks, team fracture can be strongly influenced by interactions in the first moments of a team’s collaboration, and that interventions targeting these initial moments may be critical to scaffolding long-lasting teams.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Whiting, Mark; Hugh, Grant; Bernstein, Michael
Fair work: Crowd work minimum wage with one line of code Journal Article
In: Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, vol. 7, no. 1, pp. 197-206, 2019.
@article{whiting2019fair,
title = {Fair work: Crowd work minimum wage with one line of code},
author = {Mark Whiting and Grant Hugh and Michael Bernstein},
url = {https://ojs.aaai.org/index.php/HCOMP/article/download/5283/5135/},
year = {2019},
date = {2019-10-28},
journal = {Proceedings of the AAAI Conference on Human Computation and Crowdsourcing},
volume = {7},
number = {1},
pages = {197-206},
abstract = {Accurate task pricing in microtask marketplaces requires substantial effort via trial and error, contributing to a pattern of worker underpayment. In response, we introduce Fair Work, enabling requesters to automatically pay their workers minimum wage by adding a one-line script tag to their task HTML on Amazon Mechanical Turk. Fair Work automatically surveys workers to find out how long the task takes, then aggregates those self-reports and auto-bonuses workers up to a minimum wage if needed. Evaluations demonstrate that the system estimates payments more accurately than requesters and that worker time surveys are close to behaviorally observed time measurements. With this work, we aim to lower the threshold for pro-social work practices in microtask marketplaces.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Salganik, Matthew J.
Measuring the predictability of life outcomes with a scientific mass collaboration Journal Article
In: Proceedings of the National Academy of Sciences Latest Articles, pp. 1-6, 2019.
@article{salganik2019measuring,
title = {Measuring the predictability of life outcomes with a scientific mass collaboration},
author = {Matthew J. Salganik et al.},
url = {https://drive.google.com/file/d/1YPCSUCB5Mc3ssdaPStvoS_I4i4CkadNV/view?usp=sharing},
doi = {10.1073/pnas.1915006117},
year = {2019},
date = {2019-10-01},
urldate = {2019-10-01},
journal = {Proceedings of the National Academy of Sciences Latest Articles},
pages = {1-6},
abstract = {How predictable are life trajectories? We investigated this question with a scientific mass collaboration using the common task method; 160 teams built predictive models for six life outcomes using data from the Fragile Families and Child Wellbeing Study, a high-quality birth cohort study. Despite using a rich dataset and applying machine-learning methods optimized for prediction, the best predictions were not very accurate and were only slightly better than those from a simple benchmark model. Within each outcome, prediction error was strongly associated with the family being predicted and weakly associated with the technique used to generate the prediction. Overall, these results suggest practical limits to the predictability of life outcomes in some settings and illustrate the value of mass collaborations in the social sciences.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Risi, Joseph; Sharma, Amit; Shah, Rohan; Connelly, Matthew; Watts, Duncan J.
Predicting history Journal Article
In: Nature Human Behavior, vol. 3, pp. 906–912, 2019.
@article{risi2019predicting,
title = {Predicting history},
author = {Joseph Risi and Amit Sharma and Rohan Shah and Matthew Connelly and Duncan J. Watts},
url = {https://drive.google.com/file/d/1zZrY1CmoOVxmHBfe_MRm7LsNpKrxE-CP/view?usp=sharing},
doi = {10.1038/s41562-019-0620-8},
year = {2019},
date = {2019-09-01},
urldate = {2019-09-01},
journal = {Nature Human Behavior},
volume = {3},
pages = {906–912},
abstract = {Can events be accurately described as historic at the time they are happening? Claims of this sort are in effect predictions about the evaluations of future historians; that is, that they will regard the events in question as significant. Here we provide empirical evidence in support of earlier philosophical arguments that such claims are likely to be spurious and that, conversely, many events that will one day be viewed as historic attract little attention at the time. We introduce a conceptual and methodological framework for applying machine learning prediction models to large corpora of digitized historical archives. We find that although such models can correctly identify some historically important documents, they tend to overpredict historical significance while also failing to identify many documents that will later be deemed important, where both types of error increase monotonically with the number of documents under consideration. On balance, we conclude that historical significance is extremely difficult to predict, consistent with other recent work on intrinsic limits to predictability in complex social systems. However, the results also indicate the feasibility of developing ‘artificial archivists’ to identify potentially historic documents in very large digital corpora.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hosseinmardi, Homa; Kao, Hsien-Te; Lerman, Kristina; Ferrara, Emilio
Discovering hidden structure in high dimensional human behavioral data via tensor factorization Journal Article
In: pp. 5, 2019.
@article{hosseinmardi2019discovering,
title = {Discovering hidden structure in high dimensional human behavioral data via tensor factorization},
author = {Homa Hosseinmardi and Hsien-Te Kao and Kristina Lerman and Emilio Ferrara},
url = {https://arxiv.org/pdf/1905.08846.pdf},
doi = {10.475/123_4},
year = {2019},
date = {2019-05-21},
pages = {5},
abstract = {In recent years, the rapid growth in technology has increased the opportunity for longitudinal human behavioral studies. Rich multimodal data, from wearables like Fitbit, online social networks, mobile phones etc. can be collected in natural environments. Uncovering the underlying low-dimensional structure of noisy multi-way data in an unsupervised setting is a challenging problem. Tensor factorization has been successful in extracting the interconnected low-dimensional descriptions of multi-way data. In this paper, we apply non-negative tensor factorization on a real-word wearable sensor data, StudentLife, to find latent temporal factors and group of similar individuals. Meta data is available for the semester schedule, as well as the individuals’ performance and personality. We demonstrate that non-negative tensor factorization can successfully discover clusters of individuals who exhibit higher academic performance, as well as those who frequently engage in leisure activities. The recovered latent temporal patterns associated with these groups are validated against ground truth data to demonstrate the accuracy of our framework.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kao, Hsien-Te; Yan, Shen; Huang, Di; Bartley, Nathan; Hosseinmardi, Homa; Ferrara, Emilio
Understanding Cyberbullying on Instagram and Ask.fm via Social Role Detection Journal Article
In: Companion Proceedings of The 2019 World Wide Web Conference, pp. 183-188, 2019.
@article{kao2019understanding,
title = {Understanding Cyberbullying on Instagram and Ask.fm via Social Role Detection},
author = {Hsien-Te Kao and Shen Yan and Di Huang and Nathan Bartley and Homa Hosseinmardi and Emilio Ferrara},
url = {https://dl.acm.org/doi/abs/10.1145/3308560.3316505},
doi = {10.1145/3308560.3316505},
year = {2019},
date = {2019-05-13},
journal = {Companion Proceedings of The 2019 World Wide Web Conference},
pages = {183-188},
abstract = {Cyberbullying is a major issue on online social platforms, and can have prolonged negative psychological impact on both the bullies and their targets. Users can be characterized by their involvement in cyberbullying according to different social roles including victim, bully, and victim supporter. In this work, we propose a social role detection framework to understand cyberbullying on online social platforms, and select a dataset that contains users’ records on both Instagram and Ask.fm as a case study. We refine the traditional victim-bully framework by constructing a victim-bully-supporter network on Instagram. These social roles are automatically identified via ego comment networks and linguistic cues of comments. Additionally, we analyze the consistency of users’ social role within Instagram and compare users’ behaviors on Ask.fm. Our analysis reveals the inconsistency of social roles both within and across platforms, which suggests social roles in cyberbullying are not invariant by conversation, person, or social platform.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ghasemian, Amir; Hosseinmardi, Homa; Clauset, Aaron
Evaluating overfit and underfit in models of network community structure Journal Article
In: IEEE Transactions on Knowledge and Data Engineering, vol. 32, no. 9, pp. 1722-1735, 2019.
@article{ghasemian2019evaluating,
title = {Evaluating overfit and underfit in models of network community structure},
author = {Amir Ghasemian and Homa Hosseinmardi and Aaron Clauset},
url = {https://arxiv.org/pdf/1802.10582},
year = {2019},
date = {2019-04-16},
journal = {IEEE Transactions on Knowledge and Data Engineering},
volume = {32},
number = {9},
pages = {1722-1735},
abstract = {A common graph mining task is community detection, which seeks an unsupervised decomposition of a network into groups based on statistical regularities in network connectivity. Although many such algorithms exist, community detection’s No Free Lunch theorem implies that no algorithm can be optimal across all inputs. However, little is known in practice about how different algorithms over or underfit to real networks, or how to reliably assess such behavior across algorithms. Here, we present a broad investigation of over and underfitting across 16 state-of-the-art community detection algorithms applied to a novel benchmark corpus of 572 structurally diverse real-world networks. We find that (i) algorithms vary widely in the number and composition of communities they find, given the same input; (ii) algorithms can be clustered into distinct high-level groups based on similarities of their outputs on real-world networks; (iii) algorithmic differences induce wide variation in accuracy on link-based learning tasks; and, (iv) no algorithm is always the best at such tasks across all inputs. Finally, we quantify each algorithm’s overall tendency to over or underfit to network data using a theoretically principled diagnostic, and discuss the implications for future advances in community detection.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Meyer, Michelle N; Heck, Patrick R; Holtzman, Geoffrey S; Anderson, Stephen M; Cai, William; Watts, Duncan J; Chabris, Christopher F
Objecting to experiments that compare two unobjectionable policies or treatments Journal Article
In: Proceedings of the National Academy of Sciences, vol. 116, no. 22, pp. 10723–10728, 2019.
@article{meyer2019objecting,
title = {Objecting to experiments that compare two unobjectionable policies or treatments},
author = {Michelle N Meyer and Patrick R Heck and Geoffrey S Holtzman and Stephen M Anderson and William Cai and Duncan J Watts and Christopher F Chabris},
url = {https://drive.google.com/file/d/1ChIR1vukcTAd3YCWqUZAynbrayE3YoWD/view},
doi = {10.1073/pnas.1820701116},
year = {2019},
date = {2019-01-01},
urldate = {2019-01-01},
journal = {Proceedings of the National Academy of Sciences},
volume = {116},
number = {22},
pages = {10723--10728},
publisher = {National Acad Sciences},
abstract = {Randomized experiments---long the gold standard in medicine---are increasingly used throughout the social sciences and professions to evaluate business products and services, government programs, education and health policies, and global aid. We find robust evidence---across 16 studies of 5,873 participants from three populations spanning nine domains---that people often approve of untested policies or treatments (A or B) being universally implemented but disapprove of randomized experiments (A/B tests) to determine which of those policies or treatments is superior. This effect persists even when there is no reason to prefer A to B and even when recipients are treated unequally and randomly in all conditions (A, B, and A/B). This experimentation aversion may be an important barrier to evidence-based practice.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2018
Sharma, Amit; Hofman, Jake M.; Watts, Duncan J.
Split-Door Criterion: Identification of Causal Effects Through Auxiliary Outcomes Journal Article
In: The Annals of Applied Statistics, vol. 12, no. 4, pp. 2699-2733, 2018.
@article{sharma2018split,
title = {Split-Door Criterion: Identification of Causal Effects Through Auxiliary Outcomes},
author = {Amit Sharma and Jake M. Hofman and Duncan J. Watts},
url = {https://drive.google.com/file/d/1SlOC1sdKPM4FZAceDjHTaOj5GVf3f5F5/view?usp=sharing},
doi = {10.1214/18-AOAS1179},
year = {2018},
date = {2018-04-01},
urldate = {2018-04-01},
journal = {The Annals of Applied Statistics},
volume = {12},
number = {4},
pages = {2699-2733},
abstract = {We present a method for estimating causal effects in time series data when fine-grained information about the outcome of interest is available. Specifically, we examine what we call the split-door setting, where the outcome variable can be split into two parts: one that is potentially affected by the cause being studied and another that is independent of it, with both parts sharing the same (unobserved) confounders. We show that under these conditions, the problem of identification reduces to that of testing for independence among observed variables, and propose a method that uses this approach to automatically find subsets of the data that are causally identified. We demonstrate the method by estimating the causal impact of Amazon’s recommender system on traffic to product pages, finding thousands of examples within the dataset that satisfy the split-door criterion. Unlike past studies based on natural experiments that were limited to a single product category, our method applies to a large and representative sample of products viewed on the site. In line with previous work, we find that the widely-used click-through rate (CTR) metric overestimates the causal impact of recommender systems; depending on the product category, we estimate that 50–80% of the traffic attributed to recommender systems would have happened even without any recommendations. We conclude with guidelines for using the split-door criterion as well as a discussion of other contexts where the method can be applied.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lazer, David M. J.; Baum, Matthew A.; Benkler, Yochai; Berinsky, Adam J.; Greenhill, Kelly M.; Menczer, Filippo; Metzger, Miriam J.; Nyhan, Brendan; Pennycook, Gordon; Rothschild, David; Schudson, Michael; Sloman, Steven A.; Sunstein, Cass R.; Thorson, Emily A.; Watts, Duncan J.; Zittrain, Jonathan L.
The science of fake news Journal Article
In: Science, vol. 359, no. 6380, pp. 1094-1096, 2018.
@article{lazer2018the,
title = {The science of fake news},
author = {David M. J. Lazer and Matthew A. Baum and Yochai Benkler and Adam J. Berinsky and Kelly M. Greenhill and Filippo Menczer and Miriam J. Metzger and Brendan Nyhan and Gordon Pennycook and David Rothschild and Michael Schudson and Steven A. Sloman and Cass R. Sunstein and Emily A. Thorson and Duncan J. Watts and Jonathan L. Zittrain},
url = {https://science.sciencemag.org/content/359/6380/1094/tab-pdf},
doi = {10.1126/science.aao2998},
year = {2018},
date = {2018-03-09},
journal = {Science},
volume = {359},
number = {6380},
pages = {1094-1096},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Benjamin, Daniel J.
Redefine statistical significance Journal Article
In: Nature Human Behavior, vol. 2, pp. 6-10, 2018.
@article{benjamin2018redefine,
title = {Redefine statistical significance},
author = {Daniel J. Benjamin et al.},
url = {https://drive.google.com/file/d/1JaXDX3A47agzQd1fMyFlZ5nO_4TJLxdQ/view?usp=sharing},
doi = {10.1038/s41562-017-0189-z},
year = {2018},
date = {2018-01-01},
urldate = {2018-01-01},
journal = {Nature Human Behavior},
volume = {2},
pages = {6-10},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2017
Houghton, James P.; Siegel, Michael; Madnick, Stuart; Tounaka, Nobuaki; Nakamura, Kazutaka; Sugiyama, Takaaki; Nakagawa, Daisuke; Shirnen, Buyanjargal
Beyond Keywords: Tracking the evolution of conversational clusters in social media Journal Article
In: Sociological Methods & Research, vol. 48, no. 3, pp. 588-607, 2017.
@article{houghton2017beyond,
title = {Beyond Keywords: Tracking the evolution of conversational clusters in social media},
author = {James P. Houghton and Michael Siegel and Stuart Madnick and Nobuaki Tounaka and Kazutaka Nakamura and Takaaki Sugiyama and Daisuke Nakagawa and Buyanjargal Shirnen},
url = {https://core.ac.uk/download/pdf/188187247.pdf},
doi = {10.1177/0049124117729705},
year = {2017},
date = {2017-10-09},
journal = {Sociological Methods & Research},
volume = {48},
number = {3},
pages = {588-607},
abstract = {The potential of social media to give insight into the dynamic evolution of public conversations, and into their reactive and constitutive role in political activities, has to date been underdeveloped. While topic modeling can give static insight into the structure of a conversation, and keyword volume tracking can show how engagement with a specific idea varies over time, there is need for a method of analysis able to understand how conversations about societal values evolve and react to events in the world, incorporating new ideas and relating them to existing themes. In this paper, we propose a method for analyzing social media messages that formalizes the structure of public conversations, and allows the sociologist to study the evolution of public discourse in a rigorous, replicable, and data-driven fashion. This approach may be useful to those studying the social construction of meaning, the origins of factionalism and internecine conflict, or boundary-setting and group-identification exercises; and has potential implications for those working to promote understanding and intergroup reconciliation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jasny, B. R.; Wigginton, N.; McNutt, M.; Bubela, T.; Buck, S.; Cook-Deegan, R.; Gardner, T.; Hanson, B.; Hustad, C.; Kiermer, V.; Lazer, D.; Lupia, A.; Manrai, A.; McConnell, L.; Noonan, K.; Phimister, E.; Simon, B.; Strandburg, K.; Summers, Z.; Watts, D.
Fostering reproducibility in industry-academia research Journal Article
In: Science, vol. 357, no. 6353, pp. 759-761, 2017.
@article{jasny2017fostering,
title = {Fostering reproducibility in industry-academia research},
author = {B. R. Jasny and N. Wigginton and M. McNutt and T. Bubela and S. Buck and R. Cook-Deegan and T. Gardner and B. Hanson and C. Hustad and V. Kiermer and D. Lazer and A. Lupia and A. Manrai and L. McConnell and K. Noonan and E. Phimister and B. Simon and K. Strandburg and Z. Summers and D. Watts},
url = {https://drive.google.com/file/d/1HtrSro1FT6cFmlImWYTs_GonbiKbi9Og/view?usp=sharing},
year = {2017},
date = {2017-08-25},
urldate = {2017-08-25},
journal = {Science},
volume = {357},
number = {6353},
pages = {759-761},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hofman, Jake M; Sharma, Amit; Watts, Duncan J
Prediction and explanation in social systems Journal Article
In: Science, vol. 355, no. 6324, pp. 486–488, 2017.
@article{hofman2017prediction,
title = {Prediction and explanation in social systems},
author = {Jake M Hofman and Amit Sharma and Duncan J Watts},
url = {https://drive.google.com/file/d/1lkTvMFVOBt7GZ6bu4M2nYPWb017y68XC/view},
doi = {10.1126/science.aal3856},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {Science},
volume = {355},
number = {6324},
pages = {486--488},
publisher = {American Association for the Advancement of Science},
abstract = {Historically, social scientists have sought out explanations of human and social phenomena that provide interpretable causal mechanisms, while often ignoring their predictive accuracy. We argue that the increasingly computational nature of social science is beginning to reverse this traditional bias against prediction; however, it has also highlighted three important issues that require resolution. First, current practices for evaluating predictions must be better standardized. Second, theoretical limits to predictive accuracy in complex social systems must be better characterized, thereby setting expectations for what can be predicted or explained. Third, predictive accuracy and interpretability must be recognized as complements, not substitutes, when evaluating explanations. Resolving these three issues will lead to better, more replicable, and more useful social science.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Watts, Duncan J
Should social science be more solution-oriented? Journal Article
In: Nature Human Behaviour, vol. 1, no. 1, pp. 1–5, 2017.
@article{watts2017should,
title = {Should social science be more solution-oriented?},
author = {Duncan J Watts},
url = {https://drive.google.com/file/d/1gsJRHhwcwW1C-zf3PStz1CqlVILUIitB/view},
doi = {10.1038/s41562-016-0015},
year = {2017},
date = {2017-01-01},
urldate = {2017-01-01},
journal = {Nature Human Behaviour},
volume = {1},
number = {1},
pages = {1--5},
publisher = {Nature Publishing Group},
abstract = {Over the past 100 years, social science has generated a tremendous number of theories on the topics of individual and collective human behaviour. However, it has been much less successful at reconciling the innumerable inconsistencies and contradictions among these competing explanations, a situation that has not been resolved by recent advances in `computational social science'. In this Perspective, I argue that this `incoherency problem' has been perpetuated by an historical emphasis in social science on the advancement of theories over the solution of practical problems. I argue that one way for social science to make progress is to adopt a more solution-oriented approach, starting first with a practical problem and then asking what theories (and methods) must be brought to bear to solve it. Finally, I conclude with a few suggestions regarding the sort of problems on which progress might be made and how we might organize ourselves to solve them.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2014
Watts, Duncan
Common Sense and Sociological Explanations Journal Article
In: American Journal of Sociology, vol. 120, no. 2, pp. 313-351, 2014.
@article{watts2014common,
title = {Common Sense and Sociological Explanations},
author = {Duncan Watts},
url = {http://www.jstor.org/stable/10.1086/678271},
doi = {10.1086/678271},
year = {2014},
date = {2014-09-01},
journal = {American Journal of Sociology},
volume = {120},
number = {2},
pages = {313-351},
abstract = {Sociologists have long advocated a sociological approach to explanation by contrasting it with common sense. The argument of this article, however, is that sociologists rely on common sense more than they realize. Moreover, this unacknowledged reliance causes serious problems for their explanations of social action, that is, for why people do what they do. Many such explanations, it is argued, conflate understandability with causality in ways that are not valid by the standards of scientific explanation. It follows that if sociologists want their explanations to be scientifically valid, they must evaluate them specifically on those grounds—in particular, by forcing them to make predictions. In becoming more scientific, however, it is predicted that sociologists’ explanations will also become less satisfying from an intuitive, sense-making perspective. Even as novel sources of data and improved methods open exciting new directions for sociological research, therefore, sociologists will increasingly have to choose between unsatisfying scientific explanations and satisfying but unscientific stories.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2011
Watts, Duncan J.
Crown, 2011, ISBN: 9780385531696, 0385531699.
@book{watts2011everything,
title = {Everything Is Obvious},
author = {Duncan J. Watts},
url = {https://www.google.com/books/edition/Everything_Is_Obvious/kT_4AAAAQBAJ?hl=en&gbpv=0},
isbn = {9780385531696, 0385531699},
year = {2011},
date = {2011-03-29},
publisher = {Crown},
abstract = {Why is the Mona Lisa the most famous painting in the world? Why did Facebook succeed when other social networking sites failed? Did the surge in Iraq really lead to less violence? How much can CEO's impact the performance of their companies? And does higher pay incentivize people to work hard?
If you think the answers to these questions are a matter of common sense, think again. As sociologist and network science pioneer Duncan Watts explains in this provocative book, the explanations that we give for the outcomes that we observe in life--explanation that seem obvious once we know the answer--are less useful than they seem.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
If you think the answers to these questions are a matter of common sense, think again. As sociologist and network science pioneer Duncan Watts explains in this provocative book, the explanations that we give for the outcomes that we observe in life--explanation that seem obvious once we know the answer--are less useful than they seem.