-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathCTA.bib
5776 lines (5427 loc) · 501 KB
/
CTA.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{haroon2022,
title = {YouTube, The Great Radicalizer? Auditing and Mitigating Ideological Biases in YouTube Recommendations},
author = {Haroon, Muhammad and Chhabra, Anshuman and Liu, Xin and Mohapatra, Prasant and Shafiq, Zubair and Wojcieszak, Magdalena},
year = {2022},
date = {2022},
doi = {10.48550/ARXIV.2203.10666},
url = {https://arxiv.org/abs/2203.10666}
}
@article{brooke2021trouble,
title={Trouble in programmer’s paradise: gender-biases in sharing and recognising technical knowledge on Stack Overflow},
author={Brooke, SJ},
journal={Information, Communication \& Society},
volume={24},
number={14},
pages={2091--2112},
year={2021},
publisher={Taylor \& Francis}
}
@book{silge_text_2017,
address = {London},
title = {Text {Mining} with {R}: {A} {Tidy} {Approach}},
publisher = {O'Reilly},
author = {Silge, Julia and Robinson, David},
year = {2017},
}
@book{wickham_r_2017,
address = {London},
title = {R for {Data} {Science}},
publisher = {O'Reilly Media},
author = {Wickham, Hadley and Grolemund, Garrett},
year = {2017},
}
@article{zhang_casm_2019,
title = {{CASM}: {A} {Deep}-{Learning} {Approach} for {Identifying} {Collective} {Action} {Events} with {Text} and {Image} {Data} from {Social} {Media}},
volume = {49},
issn = {0081-1750},
shorttitle = {{CASM}},
url = {https://doi.org/10.1177/0081175019860244},
doi = {10.1177/0081175019860244},
abstract = {Protest event analysis is an important method for the study of collective action and social movements and typically draws on traditional media reports as the data source. We introduce collective action from social media (CASM)—a system that uses convolutional neural networks on image data and recurrent neural networks with long short-term memory on text data in a two-stage classifier to identify social media posts about offline collective action. We implement CASM on Chinese social media data and identify more than 100,000 collective action events from 2010 to 2017 (CASM-China). We evaluate the performance of CASM through cross-validation, out-of-sample validation, and comparisons with other protest data sets. We assess the effect of online censorship and find it does not substantially limit our identification of events. Compared to other protest data sets, CASM-China identifies relatively more rural, land-related protests and relatively few collective action events related to ethnic and religious conflict.},
language = {en},
number = {1},
urldate = {2021-04-01},
journal = {Sociological Methodology},
author = {Zhang, Han and Pan, Jennifer},
month = aug,
year = {2019},
note = {Publisher: SAGE Publications Inc},
keywords = {China, collective action, deep learning, event data, social media},
pages = {1--57},
file = {SAGE PDF Full Text:/Users/cbarrie6/Zotero/storage/D6KI7SW8/Zhang and Pan - 2019 - CASM A Deep-Learning Approach for Identifying Col.pdf:application/pdf},
}
@article{garg_word_2018,
title = {Word embeddings quantify 100 years of gender and ethnic stereotypes},
volume = {115},
issn = {0027-8424, 1091-6490},
url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1720347115},
doi = {10.1073/pnas.1720347115},
abstract = {Word embeddings are a powerful machine-learning framework that represents each English word by a vector. The geometric relationship between these vectors captures meaningful semantic relationships between the corresponding words. In this paper, we develop a framework to demonstrate how the temporal dynamics of the embedding helps to quantify changes in stereotypes and attitudes toward women and ethnic minorities in the 20th and 21st centuries in the United States. We integrate word embeddings trained on 100 y of text data with the US Census to show that changes in the embedding track closely with demographic and occupation shifts over time. The embedding captures societal shifts—e.g., the women’s movement in the 1960s and Asian immigration into the United States—and also illuminates how specific adjectives and occupations became more closely associated with certain populations over time. Our framework for temporal analysis of word embedding opens up a fruitful intersection between machine learning and quantitative social science.},
language = {en},
number = {16},
urldate = {2021-04-07},
journal = {Proceedings of the National Academy of Sciences},
author = {Garg, Nikhil and Schiebinger, Londa and Jurafsky, Dan and Zou, James},
month = apr,
year = {2018},
pages = {E3635--E3644},
file = {Garg et al. - 2018 - Word embeddings quantify 100 years of gender and e.pdf:/Users/cbarrie6/Zotero/storage/WLDJM4VD/Garg et al. - 2018 - Word embeddings quantify 100 years of gender and e.pdf:application/pdf},
}
@article{martins_rise_2020,
title = {The rise of prosociality in fiction preceded democratic revolutions in {Early} {Modern} {Europe}},
volume = {117},
issn = {0027-8424, 1091-6490},
url = {http://www.pnas.org/lookup/doi/10.1073/pnas.2009571117},
doi = {10.1073/pnas.2009571117},
abstract = {The English and French Revolutions represent a turning point in history, marking the beginning of the modern rise of democracy. Recent advances in cultural evolution have put forward the idea that the early modern revolutions may be the product of a long-term psychological shift, from hierarchical and dominance-based interactions to democratic and trust-based relationships. In this study, we tested this hypothesis by analyzing theater plays during the early modern period in England and France. We found an increase in cooperation-related words over time relative to dominance-related words in both countries. Furthermore, we found that the accelerated rise of cooperation-related words preceded both the English Civil War (1642) and the French Revolution (1789). Finally, we found that rising per capita gross domestic product (GDPpc) generally led to an increase in cooperation-related words. These results highlight the likely role of long-term psychological and economic changes in explaining the rise of early modern democracies.},
language = {en},
number = {46},
urldate = {2021-04-07},
journal = {Proceedings of the National Academy of Sciences},
author = {Martins, Mauricio de Jesus Dias and Baumard, Nicolas},
month = nov,
year = {2020},
pages = {28684--28691},
file = {Martins and Baumard - 2020 - The rise of prosociality in fiction preceded democ.pdf:/Users/cbarrie6/Zotero/storage/PCHY9NR6/Martins and Baumard - 2020 - The rise of prosociality in fiction preceded democ.pdf:application/pdf},
}
@article{king_how_2013,
title = {How {Censorship} in {China} {Allows} {Government} {Criticism} but {Silences} {Collective} {Expression}},
volume = {107},
language = {en},
number = {2},
journal = {American Political Science Review},
author = {King, Gary and Pan, Jennifer and Roberts, Margaret E},
year = {2013},
pages = {18},
file = {King et al. - 2013 - How Censorship in China Allows Government Criticis.pdf:/Users/cbarrie6/Zotero/storage/5X8CYLV9/King et al. - 2013 - How Censorship in China Allows Government Criticis.pdf:application/pdf},
}
@article{molina_machine_2019,
title = {Machine {Learning} for {Sociology}},
volume = {45},
abstract = {Machine learning is a field at the intersection of statistics and computer science that uses algorithms to extract information and knowledge from data. Its applications increasingly find their way into economics, political science, and sociology. We offer a brief introduction to this vast toolbox and illustrate its current uses in the social sciences, including distilling measures from new data sources, such as text and images; characterizing population heterogeneity; improving causal inference; and offering predictions to aid policy decisions and theory development. We argue that, in addition to serving similar purposes in sociology, machine learning tools can speak to long-standing questions on the limitations of the linear modeling framework, the criteria for evaluating empirical findings, transparency around the context of discovery, and the epistemological core of the discipline.},
language = {en},
journal = {Annual Review of Sociology},
author = {Molina, Mario and Garip, Filiz},
year = {2019},
pages = {27--45},
file = {Molina and Garip - 2019 - Machine Learning for Sociology.pdf:/Users/cbarrie6/Zotero/storage/RGWG3MTM/Molina and Garip - 2019 - Machine Learning for Sociology.pdf:application/pdf},
}
@article{evans_machine_2016,
title = {Machine {Translation}: {Mining} {Text} for {Social} {Theory}},
volume = {42},
issn = {0360-0572, 1545-2115},
shorttitle = {Machine {Translation}},
url = {http://www.annualreviews.org/doi/10.1146/annurev-soc-081715-074206},
doi = {10.1146/annurev-soc-081715-074206},
abstract = {More of the social world lives within electronic text than ever before, from collective activity on the web, social media, and instant messaging to online transactions, government intelligence, and digitized libraries. This supply of text has elicited demand for natural language processing and machine learning tools to filter, search, and translate text into valuable data. We survey some of the most exciting computational approaches to text analysis, highlighting both supervised methods that extend old theories to new data and unsupervised techniques that discover hidden regularities worth theorizing. We then review recent research that uses these tools to develop social insight by exploring (a) collective attention and reasoning through the content of communication; (b) social relationships through the process of communication; and (c) social states, roles, and moves identified through heterogeneous signals within communication. We highlight social questions for which these advances could offer powerful new insight.},
language = {en},
number = {1},
urldate = {2021-04-07},
journal = {Annual Review of Sociology},
author = {Evans, James A. and Aceves, Pedro},
month = jul,
year = {2016},
pages = {21--50},
file = {Evans and Aceves - 2016 - Machine Translation Mining Text for Social Theory.pdf:/Users/cbarrie6/Zotero/storage/5GCJ5JKM/Evans and Aceves - 2016 - Machine Translation Mining Text for Social Theory.pdf:application/pdf},
}
@article{king_how_2017,
title = {How the news media activate public expression and influence national agendas},
volume = {358},
language = {en},
journal = {Science},
author = {King, Gary and Schneer, Benjamin and White, Ariel},
year = {2017},
pages = {776--780},
file = {King et al. - 2017 - How the news media activate public expression and .pdf:/Users/cbarrie6/Zotero/storage/HS6WULJD/King et al. - 2017 - How the news media activate public expression and .pdf:application/pdf},
}
@article{woodward_quantitative_1934,
title = {Quantitative {Newspaper} {Analysis} as a {Technique} of {Opinion} {Research}},
volume = {12},
language = {en},
number = {4},
journal = {Social Forces},
author = {Woodward, Julian L},
year = {1934},
pages = {526--537},
file = {Woodward - 2021 - Quantitative Newspaper Analysis as a Technique of .pdf:/Users/cbarrie6/Zotero/storage/FTPF26RW/Woodward - 2021 - Quantitative Newspaper Analysis as a Technique of .pdf:application/pdf},
}
@article{michel_quantitative_2011,
title = {Quantitative {Analysis} of {Culture} {Using} {Millions} of {Digitized} {Books}},
volume = {331},
issn = {0036-8075, 1095-9203},
url = {https://www.sciencemag.org/lookup/doi/10.1126/science.1199644},
doi = {10.1126/science.1199644},
language = {en},
number = {6014},
urldate = {2021-04-07},
journal = {Science},
author = {Michel, J.-B. and Shen, Y. K. and Aiden, A. P. and Veres, A. and Gray, M. K. and {The Google Books Team} and Pickett, J. P. and Hoiberg, D. and Clancy, D. and Norvig, P. and Orwant, J. and Pinker, S. and Nowak, M. A. and Aiden, E. L.},
month = jan,
year = {2011},
pages = {176--182},
file = {Michel et al. - 2011 - Quantitative Analysis of Culture Using Millions of.pdf:/Users/cbarrie6/Zotero/storage/CRI4G4GA/Michel et al. - 2011 - Quantitative Analysis of Culture Using Millions of.pdf:application/pdf},
}
@article{denny_text_nodate,
title = {Text {Preprocessing} {For} {Unsupervised} {Learning}: {Why} {It} {Matters}, {When} {It} {Misleads}, {And} {What} {To} {Do} {About} {It}},
abstract = {Despite the popularity of unsupervised techniques for political science text-as-data research, the importance and implications of preprocessing decisions in this domain have received scant systematic attention. Yet, as we show, such decisions have profound effects on the results of real models for real data. We argue that substantive theory is typically too vague to be of use for feature selection, and that the supervised literature is not necessarily a helpful source of advice. To aid researchers working in unsupervised settings, we introduce a statistical procedure and software that examines the sensitivity of findings under alternate preprocessing regimes. This approach complements a researcher’s substantive understanding of a problem by providing a characterization of the variability changes in preprocessing choices may induce when analyzing a particular dataset. In making scholars aware of the degree to which their results are likely to be sensitive to their preprocessing decisions, it aids replication efforts.},
language = {en},
author = {Denny, Matthew J and Spirling, Arthur},
pages = {22},
file = {Denny and Spirling - Text Preprocessing For Unsupervised Learning Why .pdf:/Users/cbarrie6/Zotero/storage/AKW5ZZ9M/Denny and Spirling - Text Preprocessing For Unsupervised Learning Why .pdf:application/pdf},
}
@article{kozlowski_geometry_2019,
title = {The {Geometry} of {Culture}: {Analyzing} the {Meanings} of {Class} through {Word} {Embeddings}},
volume = {84},
issn = {0003-1224, 1939-8271},
shorttitle = {The {Geometry} of {Culture}},
url = {http://journals.sagepub.com/doi/10.1177/0003122419877135},
doi = {10.1177/0003122419877135},
abstract = {We argue word embedding models are a useful tool for the study of culture using a historical analysis of shared understandings of social class as an empirical case. Word embeddings represent semantic relations between words as relationships between vectors in a highdimensional space, specifying a relational model of meaning consistent with contemporary theories of culture. Dimensions induced by word differences (rich – poor) in these spaces correspond to dimensions of cultural meaning, and the projection of words onto these dimensions reflects widely shared associations, which we validate with surveys. Analyzing text from millions of books published over 100 years, we show that the markers of class continuously shifted amidst the economic transformations of the twentieth century, yet the basic cultural dimensions of class remained remarkably stable. The notable exception is education, which became tightly linked to affluence independent of its association with cultivated taste.},
language = {en},
number = {5},
urldate = {2021-04-07},
journal = {American Sociological Review},
author = {Kozlowski, Austin C. and Taddy, Matt and Evans, James A.},
month = oct,
year = {2019},
pages = {905--949},
file = {Kozlowski et al. - 2019 - The Geometry of Culture Analyzing the Meanings of.pdf:/Users/cbarrie6/Zotero/storage/NY9GBZ3Q/Kozlowski et al. - 2019 - The Geometry of Culture Analyzing the Meanings of.pdf:application/pdf},
}
@article{edelmann_computational_2020,
title = {Computational {Social} {Science} and {Sociology}},
volume = {46},
issn = {0360-0572, 1545-2115},
url = {https://www.annualreviews.org/doi/10.1146/annurev-soc-121919-054621},
doi = {10.1146/annurev-soc-121919-054621},
abstract = {The integration of social science with computer science and engineering fields has produced a new area of study: computational social science. This field applies computational methods to novel sources of digital data such as social media, administrative records, and historical archives to develop theories of human behavior. We review the evolution of this field within sociology via bibliometric analysis and in-depth analysis of the following subfields where this new work is appearing most rapidly: (a) social network analysis and group formation; (b) collective behavior and political sociology; (c) the sociology of knowledge; (d) cultural sociology, social psychology, and emotions; (e) the production of culture; ( f ) economic sociology and organizations; and (g) demography and population studies. Our review reveals that sociologists are not only at the center of cutting-edge research that addresses longstanding questions about human behavior but also developing new lines of inquiry about digital spaces as well. We conclude by discussing challenging new obstacles in the field, calling for increased attention to sociological theory, and identifying new areas where computational social science might be further integrated into mainstream sociology.},
language = {en},
number = {1},
urldate = {2021-04-07},
journal = {Annual Review of Sociology},
author = {Edelmann, Achim and Wolff, Tom and Montagne, Danielle and Bail, Christopher A.},
month = jul,
year = {2020},
pages = {61--81},
file = {Edelmann et al. - 2020 - Computational Social Science and Sociology.pdf:/Users/cbarrie6/Zotero/storage/HG4HFENY/Edelmann et al. - 2020 - Computational Social Science and Sociology.pdf:application/pdf},
}
@article{chen_impact_2019,
title = {The {Impact} of {Media} {Censorship}: 1984 or {Brave} {New} {World}?},
volume = {109},
issn = {0002-8282},
shorttitle = {The {Impact} of {Media} {Censorship}},
url = {https://pubs.aeaweb.org/doi/10.1257/aer.20171765},
doi = {10.1257/aer.20171765},
abstract = {Media censorship is a hallmark of authoritarian regimes. We conduct a field experiment in China to measure the effects of providing citizens with access to an uncensored internet. We track subjects’ media consumption, beliefs regarding the media, economic beliefs, political attitudes, and behaviors over 18 months. We find four main results: (i) free access alone does not induce subjects to acquire politically sensitive information; (ii) temporary encouragement leads to a persistent increase in acquisition, indicating that demand is not permanently low; (iii) acquisition brings broad, substantial, and persistent changes to knowledge, beliefs, attitudes, and intended behaviors; and (iv) social transmission of information is statistically significant but small in magnitude. We calibrate a simple model to show that the combination of low demand for uncensored information and the moderate social transmission means China’s censorship apparatus may remain robust to a large number of citizens receiving access to an uncensored internet. (JEL C93, D72, D83, L82, L86, L88, P36)},
language = {en},
number = {6},
urldate = {2021-04-07},
journal = {American Economic Review},
author = {Chen, Yuyu and Yang, David Y.},
month = jun,
year = {2019},
pages = {2294--2332},
file = {Chen and Yang - 2019 - The Impact of Media Censorship 1984 or Brave New .pdf:/Users/cbarrie6/Zotero/storage/Z6PD7WLH/Chen and Yang - 2019 - The Impact of Media Censorship 1984 or Brave New .pdf:application/pdf},
}
@article{lansdall-welfare_content_2017,
title = {Content analysis of 150 years of {British} periodicals},
volume = {114},
issn = {0027-8424, 1091-6490},
url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1606380114},
doi = {10.1073/pnas.1606380114},
abstract = {Previous studies have shown that it is possible to detect macroscopic patterns of cultural change over periods of centuries by analyzing large textual time series, specifically digitized books. This method promises to empower scholars with a quantitative and data-driven tool to study culture and society, but its power has been limited by the use of data from books and simple analytics based essentially on word counts. This study addresses these problems by assembling a vast corpus of regional newspapers from the United Kingdom, incorporating very fine-grained geographical and temporal information that is not available for books. The corpus spans 150 years and is formed by millions of articles, representing 14\% of all British regional outlets of the period. Simple content analysis of this corpus allowed us to detect specific events, like wars, epidemics, coronations, or conclaves, with high accuracy, whereas the use of more refined techniques from artificial intelligence enabled us to move beyond counting words by detecting references to named entities. These techniques allowed us to observe both a systematic underrepresentation and a steady increase of women in the news during the 20th century and the change of geographic focus for various concepts. We also estimate the dates when electricity overtook steam and trains overtook horses as a means of transportation, both around the year 1900, along with observing other cultural transitions. We believe that these data-driven approaches can complement the traditional method of close reading in detecting trends of continuity and change in historical corpora.},
language = {en},
number = {4},
urldate = {2021-04-07},
journal = {Proceedings of the National Academy of Sciences},
author = {Lansdall-Welfare, Thomas and Sudhahar, Saatviga and Thompson, James and Lewis, Justin and {FindMyPast Newspaper Team} and Cristianini, Nello},
month = jan,
year = {2017},
pages = {E457--E465},
file = {Lansdall-Welfare et al. - 2017 - Content analysis of 150 years of British periodica.pdf:/Users/cbarrie6/Zotero/storage/2LK2ANME/Lansdall-Welfare et al. - 2017 - Content analysis of 150 years of British periodica.pdf:application/pdf},
}
@article{theocharis_computational_2021,
title = {Computational {Social} {Science} and the {Study} of {Political} {Communication}},
volume = {38},
issn = {1058-4609, 1091-7675},
url = {https://www.tandfonline.com/doi/full/10.1080/10584609.2020.1833121},
doi = {10.1080/10584609.2020.1833121},
abstract = {The challenge of disentangling political communication processes and their effects has grown with the complexity of the new political information environment. But so have scientists’ toolsets and capa cities to better study and understand them. We map the challenges and opportunities of developing, synthesizing, and applying data collection and analysis techniques relying primarily on computational methods and tools to answer substantive theory-driven questions in the field of political communication. We foreground the theoretical, empirical, and institutional opportunities and challenges of Computational Communication Science (CCS) that are relevant to the political communication community. We also assess understand ings of CCS and highlight challenges associated with data and resource requirements, as well as those connected with the theory and semantics of digital signals. With an eye to existing practices, we elaborate on the key role of infrastructures, academic institutions, ethics, and training in computational methods. Finally, we present the six full articles and two forum contributions of this special issue illustrating methodological innovation, as well as the theoretical, practical, and institutional relevance and challenges for realizing the potential of computational methods in political communication.},
language = {en},
number = {1-2},
urldate = {2021-04-07},
journal = {Political Communication},
author = {Theocharis, Yannis and Jungherr, Andreas},
month = mar,
year = {2021},
pages = {1--22},
file = {Theocharis and Jungherr - 2021 - Computational Social Science and the Study of Poli.pdf:/Users/cbarrie6/Zotero/storage/3KVBEXRC/Theocharis and Jungherr - 2021 - Computational Social Science and the Study of Poli.pdf:application/pdf},
}
@article{rodriguez_models_2021,
title = {Models for {Context}-{Specific} {Description} and {Inference} in {Political} {Science}},
url = {https://github.com/prodriguezsosa/EmbeddingRegression},
abstract = {Political scientists commonly seek to make statements about how a word’s use and meaning varies over circumstances—whether that be time, partisan identity, or some other document-level covariate. A promising avenue is the use of domain-specific word embeddings, that simultaneously allow for statements of uncertainty and statistical inference. We introduce the `a la Carte on Text (ConText) embedding regression model for this purpose. We extend and validate a simple model-based linear method of refitting pre-trained embeddings to local contexts that requires minimal input data. It outperforms well-known competitors for studying changes in meaning across groups and time. Our approach allows us to speak descriptively of systematic differences across covariates in the way that words are used, and to comment on whether a particular use is statistically significantly different to another. We provide evidence of excellent relative performance of the model, and show how it might be used in substantive research.},
language = {en},
journal = {Working paper},
author = {Rodriguez, Pedro L and Spirling, Arthur and Stewart, Brandon M},
year = {2021},
pages = {1--43},
file = {Rodriguez et al. - Models for Context-Specific Description and Inferen.pdf:/Users/cbarrie6/Zotero/storage/KDELNJLE/Rodriguez et al. - Models for Context-Specific Description and Inferen.pdf:application/pdf},
}
@article{khodak_carte_2018,
title = {A {La} {Carte} {Embedding}: {Cheap} but {Effective} {Induction} of {Semantic} {Feature} {Vectors}},
shorttitle = {A {La} {Carte} {Embedding}},
url = {http://arxiv.org/abs/1805.05388},
abstract = {Motivations like domain adaptation, transfer learning, and feature learning have fueled interest in inducing embeddings for rare or unseen words, n-grams, synsets, and other textual features. This paper introduces a` la carte embedding, a simple and general alternative to the usual word2vec-based approaches for building such representations that is based upon recent theoretical results for GloVe-like embeddings. Our method relies mainly on a linear transformation that is efficiently learnable using pretrained word vectors and linear regression. This transform is applicable “on the fly” in the future when a new text feature or rare word is encountered, even if only a single usage example is available. We introduce a new dataset showing how the a` la carte method requires fewer examples of words in context to learn high-quality embeddings and we obtain state-of-the-art results on a nonce task and some unsupervised document classification tasks.},
language = {en},
urldate = {2021-04-07},
journal = {arXiv:1805.05388 [cs]},
author = {Khodak, Mikhail and Saunshi, Nikunj and Liang, Yingyu and Ma, Tengyu and Stewart, Brandon and Arora, Sanjeev},
month = may,
year = {2018},
note = {arXiv: 1805.05388},
keywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language},
file = {Khodak et al. - 2018 - A La Carte Embedding Cheap but Effective Inductio.pdf:/Users/cbarrie6/Zotero/storage/EUU68IKF/Khodak et al. - 2018 - A La Carte Embedding Cheap but Effective Inductio.pdf:application/pdf},
}
@article{pennington_glove_2014,
title = {{GloVe}: {Global} {Vectors} for {Word} {Representation}},
abstract = {Recent methods for learning vector space representations of words have succeeded in capturing fine-grained semantic and syntactic regularities using vector arithmetic, but the origin of these regularities has remained opaque. We analyze and make explicit the model properties needed for such regularities to emerge in word vectors. The result is a new global logbilinear regression model that combines the advantages of the two major model families in the literature: global matrix factorization and local context window methods. Our model efficiently leverages statistical information by training only on the nonzero elements in a word-word cooccurrence matrix, rather than on the entire sparse matrix or on individual context windows in a large corpus. The model produces a vector space with meaningful substructure, as evidenced by its performance of 75\% on a recent word analogy task. It also outperforms related models on similarity tasks and named entity recognition.},
language = {en},
author = {Pennington, Jeffrey and Socher, Richard and Manning, Christopher D},
year = {2014},
pages = {12},
file = {Pennington et al. - GloVe Global Vectors for Word Representation.pdf:/Users/cbarrie6/Zotero/storage/DNEARQ54/Pennington et al. - GloVe Global Vectors for Word Representation.pdf:application/pdf},
}
@article{mikolov_efficient_2013,
title = {Efficient {Estimation} of {Word} {Representations} in {Vector} {Space}},
url = {http://arxiv.org/abs/1301.3781},
abstract = {We propose two novel model architectures for computing continuous vector representations of words from very large data sets. The quality of these representations is measured in a word similarity task, and the results are compared to the previously best performing techniques based on different types of neural networks. We observe large improvements in accuracy at much lower computational cost, i.e. it takes less than a day to learn high quality word vectors from a 1.6 billion words data set. Furthermore, we show that these vectors provide state-of-the-art performance on our test set for measuring syntactic and semantic word similarities.},
language = {en},
urldate = {2021-04-07},
journal = {arXiv:1301.3781 [cs]},
author = {Mikolov, Tomas and Chen, Kai and Corrado, Greg and Dean, Jeffrey},
month = sep,
year = {2013},
note = {arXiv: 1301.3781},
keywords = {Computer Science - Computation and Language},
file = {Mikolov et al. - 2013 - Efficient Estimation of Word Representations in Ve.pdf:/Users/cbarrie6/Zotero/storage/CMTTJ9FW/Mikolov et al. - 2013 - Efficient Estimation of Word Representations in Ve.pdf:application/pdf},
}
@article{mikolov_distributed_nodate,
title = {Distributed {Representations} of {Words} and {Phrases} and their {Compositionality}},
abstract = {The recently introduced continuous Skip-gram model is an efficient method for learning high-quality distributed vector representations that capture a large number of precise syntactic and semantic word relationships. In this paper we present several extensions that improve both the quality of the vectors and the training speed. By subsampling of the frequent words we obtain significant speedup and also learn more regular word representations. We also describe a simple alternative to the hierarchical softmax called negative sampling.},
language = {en},
author = {Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado, Greg S and Dean, Jeff},
pages = {9},
file = {Mikolov et al. - Distributed Representations of Words and Phrases a.pdf:/Users/cbarrie6/Zotero/storage/DBDNP2LD/Mikolov et al. - Distributed Representations of Words and Phrases a.pdf:application/pdf},
}
@article{ruder_survey_2019,
title = {A {Survey} of {Cross}-lingual {Word} {Embedding} {Models}},
volume = {65},
issn = {1076-9757},
url = {https://jair.org/index.php/jair/article/view/11640},
doi = {10.1613/jair.1.11640},
abstract = {Cross-lingual representations of words enable us to reason about word meaning in multilingual contexts and are a key facilitator of cross-lingual transfer when developing natural language processing models for low-resource languages. In this survey, we provide a comprehensive typology of cross-lingual word embedding models. We compare their data requirements and objective functions. The recurring theme of the survey is that many of the models presented in the literature optimize for the same objectives, and that seemingly different models are often equivalent, modulo optimization strategies, hyper-parameters, and such. We also discuss the different ways cross-lingual word embeddings are evaluated, as well as future challenges and research horizons.},
language = {en},
urldate = {2021-04-07},
journal = {Journal of Artificial Intelligence Research},
author = {Ruder, Sebastian and Vulić, Ivan and Søgaard, Anders},
month = aug,
year = {2019},
pages = {569--631},
file = {Ruder et al. - 2019 - A Survey of Cross-lingual Word Embedding Models.pdf:/Users/cbarrie6/Zotero/storage/TF9RGTP5/Ruder et al. - 2019 - A Survey of Cross-lingual Word Embedding Models.pdf:application/pdf},
}
@article{soliman_aravec_2017,
title = {{AraVec}: {A} set of {Arabic} {Word} {Embedding} {Models} for use in {Arabic} {NLP}},
volume = {117},
issn = {18770509},
shorttitle = {{AraVec}},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1877050917321749},
doi = {10.1016/j.procs.2017.10.117},
abstract = {Advancements in neural networks have led to developments in fields like computer vision, speech recognition and natural language processing (NLP). One of the most influential recent developments in NLP is the use of word embeddings, where words are represented as vectors in a continuous space, capturing many syntactic and semantic relations among them. AraVec is a pre-trained distributed word representation (word embedding) open source project which aims to provide the Arabic NLP research community with free to use and powerful word embedding models. The first version of AraVec provides six different word embedding models built on top of three different Arabic content domains; Tweets, World Wide Web pages and Wikipedia Arabic articles. The total number of tokens used to build the models amounts to more than 3,300,000,000. This paper describes the resources used for building the models, the employed data cleaning techniques, the carried out preprocessing step, as well as the details of the employed word embedding creation techniques.},
language = {en},
urldate = {2021-04-07},
journal = {Procedia Computer Science},
author = {Soliman, Abu Bakr and Eissa, Kareem and El-Beltagy, Samhaa R.},
year = {2017},
pages = {256--265},
file = {Soliman et al. - 2017 - AraVec A set of Arabic Word Embedding Models for .pdf:/Users/cbarrie6/Zotero/storage/CA567FJP/Soliman et al. - 2017 - AraVec A set of Arabic Word Embedding Models for .pdf:application/pdf},
}
@article{lucas_computer-assisted_2015,
title = {Computer-{Assisted} {Text} {Analysis} for {Comparative} {Politics}},
volume = {23},
issn = {1047-1987, 1476-4989},
url = {https://www.cambridge.org/core/product/identifier/S1047198700011736/type/journal_article},
doi = {10.1093/pan/mpu019},
abstract = {Recent advances in research tools for the systematic analysis of textual data are enabling exciting new research throughout the social sciences. For comparative politics, scholars who are often interested in non-English and possibly multilingual textual datasets, these advances may be difficult to access. This article discusses practical issues that arise in the processing, management, translation, and analysis of textual data with a particular focus on how procedures differ across languages. These procedures are combined in two applied examples of automated text analysis using the recently introduced Structural Topic Model. We also show how the model can be used to analyze data that have been translated into a single language via machine translation tools. All the methods we describe here are implemented in open-source software packages available from the authors.},
language = {en},
number = {2},
urldate = {2021-04-07},
journal = {Political Analysis},
author = {Lucas, Christopher and Nielsen, Richard A. and Roberts, Margaret E. and Stewart, Brandon M. and Storer, Alex and Tingley, Dustin},
year = {2015},
pages = {254--277},
file = {Lucas et al. - 2015 - Computer-Assisted Text Analysis for Comparative Po.pdf:/Users/cbarrie6/Zotero/storage/JZPIHVCL/Lucas et al. - 2015 - Computer-Assisted Text Analysis for Comparative Po.pdf:application/pdf},
}
@article{fouad_arwordvec_2020,
title = {{ArWordVec}: efficient word embedding models for {Arabic} tweets},
volume = {24},
issn = {1433-7479},
shorttitle = {{ArWordVec}},
url = {https://doi.org/10.1007/s00500-019-04153-6},
doi = {10.1007/s00500-019-04153-6},
abstract = {One of the major advances in artificial intelligence nowadays is to understand, process and utilize the humans’ natural language. This has been achieved by employing the different natural language processing (NLP) techniques along with the aid of the various deep learning approaches and architectures. Using the distributed word representations to substitute the traditional bag-of-words approach has been utilized very efficiently in the last years for many NLP tasks. In this paper, we present the detailed steps of building a set of efficient word embedding models called ArWordVec that are generated from a huge repository of Arabic tweets. In addition, a new method for measuring Arabic word similarity is introduced that has been used in evaluating the performance of the generated ArWordVec models. The experimental results show that the performance of the ArWordVec models overcomes the recently available models on Arabic Twitter data for the word similarity task. In addition, two of the large Arabic tweets datasets are used to examine the performance of the proposed models in the multi-class sentiment analysis task. The results show that the proposed models are very efficient and help in achieving a classification accuracy ratio exceeding 73.86\% with a high average F1 value of 74.15.},
language = {en},
number = {11},
urldate = {2021-04-07},
journal = {Soft Computing},
author = {Fouad, Mohammed M. and Mahany, Ahmed and Aljohani, Naif and Abbasi, Rabeeh Ayaz and Hassan, Saeed-Ul},
month = jun,
year = {2020},
pages = {8061--8068},
}
@article{vosoughi_spread_2018,
title = {The spread of true and false news online},
volume = {359},
issn = {0036-8075, 1095-9203},
url = {https://www.sciencemag.org/lookup/doi/10.1126/science.aap9559},
doi = {10.1126/science.aap9559},
language = {en},
number = {6380},
urldate = {2021-04-07},
journal = {Science},
author = {Vosoughi, Soroush and Roy, Deb and Aral, Sinan},
month = mar,
year = {2018},
pages = {1146--1151},
file = {Vosoughi et al. - 2018 - The spread of true and false news online.pdf:/Users/cbarrie6/Zotero/storage/UKGFN234/Vosoughi et al. - 2018 - The spread of true and false news online.pdf:application/pdf},
}
@article{del_vicario_spreading_2016,
title = {The spreading of misinformation online},
volume = {113},
issn = {0027-8424, 1091-6490},
url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1517441113},
doi = {10.1073/pnas.1517441113},
abstract = {The wide availability of user-provided content in online social media facilitates the aggregation of people around common interests, worldviews, and narratives. However, the World Wide Web (WWW) also allows for the rapid dissemination of unsubstantiated rumors and conspiracy theories that often elicit rapid, large, but naive social responses such as the recent case of Jade Helm 15––where a simple military exercise turned out to be perceived as the beginning of a new civil war in the United States. In this work, we address the determinants governing misinformation spreading through a thorough quantitative analysis. In particular, we focus on how Facebook users consume information related to two distinct narratives: scientific and conspiracy news. We find that, although consumers of scientific and conspiracy stories present similar consumption patterns with respect to content, cascade dynamics differ. Selective exposure to content is the primary driver of content diffusion and generates the formation of homogeneous clusters, i.e., “echo chambers.” Indeed, homogeneity appears to be the primary driver for the diffusion of contents and each echo chamber has its own cascade dynamics. Finally, we introduce a data-driven percolation model mimicking rumor spreading and we show that homogeneity and polarization are the main determinants for predicting cascades’ size.},
language = {en},
number = {3},
urldate = {2021-04-07},
journal = {Proceedings of the National Academy of Sciences},
author = {Del Vicario, Michela and Bessi, Alessandro and Zollo, Fabiana and Petroni, Fabio and Scala, Antonio and Caldarelli, Guido and Stanley, H. Eugene and Quattrociocchi, Walter},
month = jan,
year = {2016},
pages = {554--559},
file = {Del Vicario et al. - 2016 - The spreading of misinformation online.pdf:/Users/cbarrie6/Zotero/storage/F2JHGE9P/Del Vicario et al. - 2016 - The spreading of misinformation online.pdf:application/pdf},
}
@article{allcott_social_2016,
title = {Social {Media} and {Fake} {News} in the 2016 {Election}},
volume = {31},
language = {en},
number = {2},
journal = {Journal of Economic Perspectives},
author = {Allcott, Hunt and Gentzkow, Matthew},
year = {2016},
pages = {211--236},
file = {Allcott and Gentzkow - Social Media and Fake News in the 2016 Election.pdf:/Users/cbarrie6/Zotero/storage/KE4QR9AL/Allcott and Gentzkow - Social Media and Fake News in the 2016 Election.pdf:application/pdf},
}
@article{gentzkow_text_2019,
title = {Text as {Data}},
volume = {57},
issn = {0022-0515},
url = {https://pubs.aeaweb.org/doi/10.1257/jel.20181020},
doi = {10.1257/jel.20181020},
abstract = {An ever-increasing share of human interaction, communication, and culture is recorded as digital text. We provide an introduction to the use of text as an input to economic research. We discuss the features that make text different from other forms of data, offer a practical overview of relevant statistical methods, and survey a variety of applications. (JEL C38, C55, L82, Z13)},
language = {en},
number = {3},
urldate = {2021-04-07},
journal = {Journal of Economic Literature},
author = {Gentzkow, Matthew and Kelly, Bryan and Taddy, Matt},
month = sep,
year = {2019},
pages = {535--574},
file = {Gentzkow et al. - 2019 - Text as Data.pdf:/Users/cbarrie6/Zotero/storage/53LTPWK7/Gentzkow et al. - 2019 - Text as Data.pdf:application/pdf},
}
@article{levy_social_2021,
title = {Social {Media}, {News} {Consumption}, and {Polarization}: {Evidence} from a {Field} {Experiment}},
volume = {111},
issn = {0002-8282},
shorttitle = {Social {Media}, {News} {Consumption}, and {Polarization}},
url = {https://pubs.aeaweb.org/doi/10.1257/aer.20191777},
doi = {10.1257/aer.20191777},
abstract = {Does the consumption of ideologically congruent news on social media exacerbate polarization? I estimate the effects of social media news exposure by conducting a large field experiment randomly offering participants subscriptions to conservative or liberal news outlets on Facebook. I collect data on the causal chain of media effects: subscriptions to outlets, exposure to news on Facebook, visits to online news sites, and sharing of posts, as well as changes in political opinions and attitudes. Four main findings emerge. First, random variation in exposure to news on social media substantially affects the slant of news sites that individuals visit. Second, exposure to counter-attitudinal news decreases negative attitudes toward the opposing political party. Third, in contrast to the effect on attitudes, I find no evidence that the political leanings of news outlets affect political opinions. Fourth, Facebook’s algorithm is less likely to supply individuals with posts from counter-attitudinal outlets, conditional on individuals subscribing to them. Together, the results suggest that social media algorithms may limit exposure to counter-attitudinal news and thus increase polarization. (JEL C93, D72, L82)},
language = {en},
number = {3},
urldate = {2021-04-07},
journal = {American Economic Review},
author = {Levy, Ro’ee},
month = mar,
year = {2021},
pages = {831--870},
file = {Levy - 2021 - Social Media, News Consumption, and Polarization .pdf:/Users/cbarrie6/Zotero/storage/J6AVDVHA/Levy - 2021 - Social Media, News Consumption, and Polarization .pdf:application/pdf},
}
@article{iyengar_scientific_2019,
title = {Scientific communication in a post-truth society},
volume = {116},
copyright = {© 2019 . https://www.pnas.org/site/aboutpnas/licenses.xhtmlPublished under the PNAS license.},
issn = {0027-8424, 1091-6490},
url = {https://www.pnas.org/content/116/16/7656},
doi = {10.1073/pnas.1805868115},
abstract = {Within the scientific community, much attention has focused on improving communications between scientists, policy makers, and the public. To date, efforts have centered on improving the content, accessibility, and delivery of scientific communications. Here we argue that in the current political and media environment faulty communication is no longer the core of the problem. Distrust in the scientific enterprise and misperceptions of scientific knowledge increasingly stem less from problems of communication and more from the widespread dissemination of misleading and biased information. We describe the profound structural shifts in the media environment that have occurred in recent decades and their connection to public policy decisions and technological changes. We explain how these shifts have enabled unscrupulous actors with ulterior motives increasingly to circulate fake news, misinformation, and disinformation with the help of trolls, bots, and respondent-driven algorithms. We document the high degree of partisan animosity, implicit ideological bias, political polarization, and politically motivated reasoning that now prevail in the public sphere and offer an actual example of how clearly stated scientific conclusions can be systematically perverted in the media through an internet-based campaign of disinformation and misinformation. We suggest that, in addition to attending to the clarity of their communications, scientists must also develop online strategies to counteract campaigns of misinformation and disinformation that will inevitably follow the release of findings threatening to partisans on either end of the political spectrum.},
language = {en},
number = {16},
urldate = {2021-04-07},
journal = {Proceedings of the National Academy of Sciences},
author = {Iyengar, Shanto and Massey, Douglas S.},
month = apr,
year = {2019},
pmid = {30478050},
note = {Publisher: National Academy of Sciences
Section: Colloquium Paper},
keywords = {bias, communication, media, politics, science},
pages = {7656--7661},
file = {Full Text PDF:/Users/cbarrie6/Zotero/storage/MYAGBXBL/Iyengar and Massey - 2019 - Scientific communication in a post-truth society.pdf:application/pdf;Snapshot:/Users/cbarrie6/Zotero/storage/VMEY94VZ/7656.html:text/html},
}
@article{allen_evaluating_2020,
title = {Evaluating the fake news problem at the scale of the information ecosystem},
volume = {6},
copyright = {Copyright © 2020 The Authors, some rights reserved; exclusive licensee American Association for the Advancement of Science. No claim to original U.S. Government Works. Distributed under a Creative Commons Attribution NonCommercial License 4.0 (CC BY-NC).. This is an open-access article distributed under the terms of the Creative Commons Attribution-NonCommercial license, which permits use, distribution, and reproduction in any medium, so long as the resultant use is not for commercial advantage and provided the original work is properly cited.},
issn = {2375-2548},
url = {https://advances.sciencemag.org/content/6/14/eaay3539},
doi = {10.1126/sciadv.aay3539},
abstract = {“Fake news,” broadly defined as false or misleading information masquerading as legitimate news, is frequently asserted to be pervasive online with serious consequences for democracy. Using a unique multimode dataset that comprises a nationally representative sample of mobile, desktop, and television consumption, we refute this conventional wisdom on three levels. First, news consumption of any sort is heavily outweighed by other forms of media consumption, comprising at most 14.2\% of Americans’ daily media diets. Second, to the extent that Americans do consume news, it is overwhelmingly from television, which accounts for roughly five times as much as news consumption as online. Third, fake news comprises only 0.15\% of Americans’ daily media diet. Our results suggest that the origins of public misinformedness and polarization are more likely to lie in the content of ordinary news or the avoidance of news altogether as they are in overt fakery.
Mainstream news, mainly on television, vastly outweighs fake news, and news itself is a small fraction of U.S. media consumption.
Mainstream news, mainly on television, vastly outweighs fake news, and news itself is a small fraction of U.S. media consumption.},
language = {en},
number = {14},
urldate = {2021-04-07},
journal = {Science Advances},
author = {Allen, Jennifer and Howland, Baird and Mobius, Markus and Rothschild, David and Watts, Duncan J.},
month = apr,
year = {2020},
note = {Publisher: American Association for the Advancement of Science
Section: Research Article},
pages = {eaay3539},
file = {Full Text PDF:/Users/cbarrie6/Zotero/storage/9YJCAK5H/Allen et al. - 2020 - Evaluating the fake news problem at the scale of t.pdf:application/pdf;Snapshot:/Users/cbarrie6/Zotero/storage/8CBNX2S5/eaay3539.html:text/html},
}
@article{rheault_word_2020,
title = {Word {Embeddings} for the {Analysis} of {Ideological} {Placement} in {Parliamentary} {Corpora}},
volume = {28},
issn = {1047-1987, 1476-4989},
url = {https://www.cambridge.org/core/journals/political-analysis/article/abs/word-embeddings-for-the-analysis-of-ideological-placement-in-parliamentary-corpora/017F0CEA9B3DB6E1B94AC36A509A8A7B},
doi = {10.1017/pan.2019.26},
abstract = {Word embeddings, the coefficients from neural network models predicting the use of words in context, have now become inescapable in applications involving natural language processing. Despite a few studies in political science, the potential of this methodology for the analysis of political texts has yet to be fully uncovered. This paper introduces models of word embeddings augmented with political metadata and trained on large-scale parliamentary corpora from Britain, Canada, and the United States. We fit these models with indicator variables of the party affiliation of members of parliament, which we refer to as party embeddings. We illustrate how these embeddings can be used to produce scaling estimates of ideological placement and other quantities of interest for political research. To validate the methodology, we assess our results against indicators from the Comparative Manifestos Project, surveys of experts, and measures based on roll-call votes. Our findings suggest that party embeddings are successful at capturing latent concepts such as ideology, and the approach provides researchers with an integrated framework for studying political language.},
language = {en},
number = {1},
urldate = {2021-04-07},
journal = {Political Analysis},
author = {Rheault, Ludovic and Cochrane, Christopher},
month = jan,
year = {2020},
note = {Publisher: Cambridge University Press},
keywords = {natural language processing, parliamentary corpora, political ideology, text as data, word embeddings},
pages = {112--133},
file = {Snapshot:/Users/cbarrie6/Zotero/storage/49NE2WMP/017F0CEA9B3DB6E1B94AC36A509A8A7B.html:text/html},
}
@article{rodman_timely_2020,
title = {A {Timely} {Intervention}: {Tracking} the {Changing} {Meanings} of {Political} {Concepts} with {Word} {Vectors}},
volume = {28},
issn = {1047-1987, 1476-4989},
shorttitle = {A {Timely} {Intervention}},
url = {https://www.cambridge.org/core/journals/political-analysis/article/abs/timely-intervention-tracking-the-changing-meanings-of-political-concepts-with-word-vectors/DDF3B5833A12E673EEE24FBD9798679E},
doi = {10.1017/pan.2019.23},
abstract = {Word vectorization is an emerging text-as-data method that shows great promise for automating the analysis of semantics—here, the cultural meanings of words—in large volumes of text. Yet successes with this method have largely been confined to massive corpora where the meanings of words are presumed to be fixed. In political science applications, however, many corpora are comparatively small and many interesting questions hinge on the recognition that meaning changes over time. Together, these two facts raise vexing methodological challenges. Can word vectors trace the changing cultural meanings of words in typical small corpora use cases? I test four time-sensitive implementations of word vectors (word2vec) against a gold standard developed from a modest data set of 161 years of newspaper coverage. I find that one implementation method clearly outperforms the others in matching human assessments of how public dialogues around equality in America have changed over time. In addition, I suggest best practices for using word2vec to study small corpora for time series questions, including bootstrap resampling of documents and pretraining of vectors. I close by showing that word2vec allows granular analysis of the changing meaning of words, an advance over other common text-as-data methods for semantic research questions.},
language = {en},
number = {1},
urldate = {2021-04-07},
journal = {Political Analysis},
author = {Rodman, Emma},
month = jan,
year = {2020},
note = {Publisher: Cambridge University Press},
keywords = {analysis of political speech, automated content analysis, statistical analysis of texts, time series},
pages = {87--111},
file = {Snapshot:/Users/cbarrie6/Zotero/storage/DS7AUDS6/DDF3B5833A12E673EEE24FBD9798679E.html:text/html},
}
@article{toeppe_identification_2021,
title = {Identification of {Biased} {Terms} in {News} {Articles} by {Comparison} of {Outlet}-{Specific} {Word} {Embeddings}},
volume = {12646},
url = {https://www.researchgate.net/publication/348303058},
abstract = {Slanted news coverage, also called media bias, can heavily influence how news consumers interpret and react to the news. To automatically identify biased language, we present an exploratory approach that compares the context of related words. We train two word embedding models, one on texts of leftwing, the other on right-wing news outlets. Our hypothesis is that a word's representations in both word embedding spaces are more similar for non-biased words than biased words. The underlying idea is that the context of biased words in different news outlets varies more strongly than the one of non-biased words, since the perception of a word as being biased differs depending on its context. While we do not find statistical significance to accept the hypothesis, the results show the effectiveness of the approach. For example, after a linear mapping of both word embeddings spaces, 31\% of the words with the largest distances potentially induce bias. To improve the results, we find that the dataset needs to be significantly larger, and we derive further methodology as future research direction. To our knowledge, this paper presents the first in-depth look at the context of bias words measured by word embeddings.},
language = {en},
urldate = {2021-04-07},
author = {Spinde, Timo and Rudnitckaia, Lada and Hamborg, Felix and Gipp, Bela},
editor = {Toeppe, Katharina and Yan, Hui and Chu, Samuel Kai Wah},
year = {2021},
doi = {10.1007/978-3-030-71305-8_17},
note = {Series Title: Lecture Notes in Computer Science},
pages = {215--224},
file = {Spinde et al. - 2021 - Identification of Biased Terms in News Articles by.pdf:/Users/cbarrie6/Zotero/storage/GN2EB6ZN/Spinde et al. - 2021 - Identification of Biased Terms in News Articles by.pdf:application/pdf},
}
@article{jones_stereotypical_2020,
title = {Stereotypical {Gender} {Associations} in {Language} {Have} {Decreased} {Over} {Time}},
volume = {7},
issn = {23306696},
url = {https://www.sociologicalscience.com/articles-v7-1-1/},
doi = {10.15195/v7.a1},
abstract = {Using a corpus of millions of digitized books, we document the presence and trajectory over time of stereotypical gender associations in the written English language from 1800 to 2000. We employ the novel methodology of word embeddings to quantify male gender bias: the tendency to associate a domain with the male gender. We measure male gender bias in four stereotypically gendered domains: career, family, science, and arts. We found that stereotypical gender associations in language have decreased over time but still remain, with career and science terms demonstrating positive male gender bias and family and arts terms demonstrating negative male gender bias. We also seek evidence of changing associations corresponding to the second shift and find partial support. Traditional gender ideology is latent within the text of published English-language books, yet the magnitude of traditionally gendered associations appears to be decreasing over time.},
language = {en},
urldate = {2021-04-07},
journal = {Sociological Science},
author = {Jones, Jason and Amin, Mohammad and Kim, Jessica and Skiena, Steven},
year = {2020},
pages = {1--35},
file = {Jones et al. - 2020 - Stereotypical Gender Associations in Language Have.pdf:/Users/cbarrie6/Zotero/storage/BE4CQVJI/Jones et al. - 2020 - Stereotypical Gender Associations in Language Have.pdf:application/pdf},
}
@article{stoltz_concept_2019,
title = {Concept {Mover}’s {Distance}: measuring concept engagement via word embeddings in texts},
volume = {2},
issn = {2432-2717, 2432-2725},
shorttitle = {Concept {Mover}’s {Distance}},
url = {http://link.springer.com/10.1007/s42001-019-00048-6},
doi = {10.1007/s42001-019-00048-6},
abstract = {We propose a method for measuring a text’s engagement with a focal concept using distributional representations of the meaning of words. More specifically, this measure relies on word mover’s distance, which uses word embeddings to determine similarities between two documents. In our approach, which we call Concept Mover’s Distance, a document is measured by the minimum distance the words in the document need to travel to arrive at the position of a “pseudo document” consisting of only words denoting a focal concept. This approach captures the prototypical structure of concepts, is fairly robust to pruning sparse terms as well as variation in text lengths within a corpus, and with pre-trained embeddings, can be used even when terms denoting concepts are absent from corpora and can be applied to bag-of-words datasets. We close by outlining some limitations of the proposed method as well as opportunities for future research.},
language = {en},
number = {2},
urldate = {2021-04-07},
journal = {Journal of Computational Social Science},
author = {Stoltz, Dustin S. and Taylor, Marshall A.},
month = jul,
year = {2019},
pages = {293--313},
file = {Stoltz and Taylor - 2019 - Concept Mover’s Distance measuring concept engage.pdf:/Users/cbarrie6/Zotero/storage/FI65FBMX/Stoltz and Taylor - 2019 - Concept Mover’s Distance measuring concept engage.pdf:application/pdf},
}
@article{arseniev-koehler_machine_2020,
title = {Machine learning as a model for cultural learning: {Teaching} an algorithm what it means to be fat},
shorttitle = {Machine learning as a model for cultural learning},
url = {http://arxiv.org/abs/2003.12133},
abstract = {As we navigate our cultural environment, we learn cultural biases, like those around gender, social class, health, and body weight. It is unclear, however, exactly how public culture becomes private culture. In this paper, we provide a theoretical account of such cultural learning. We propose that neural word embeddings provide a parsimonious and cognitively plausible model of the representations learned from natural language. Using neural word embeddings, we extract cultural schemata about body weight from New York Times articles. We identify several cultural schemata that link obesity to gender, immorality, poor health, and low socioeconomic class. Such schemata may be subtly but pervasively activated in public culture; thus, language can chronically reproduce biases. Our findings reinforce ongoing concerns that machine learning can also encode, and reproduce, harmful human biases.},
language = {en},
urldate = {2021-04-07},
journal = {arXiv:2003.12133 [cs]},
author = {Arseniev-Koehler, Alina and Foster, Jacob G.},
month = jun,
year = {2020},
note = {arXiv: 2003.12133},
keywords = {Computer Science - Computation and Language, Computer Science - Computers and Society, Computer Science - Machine Learning},
file = {Arseniev-Koehler and Foster - 2020 - Machine learning as a model for cultural learning.pdf:/Users/cbarrie6/Zotero/storage/AK8ZS2NK/Arseniev-Koehler and Foster - 2020 - Machine learning as a model for cultural learning.pdf:application/pdf},
}
@article{hamilton_diachronic_2018,
title = {Diachronic {Word} {Embeddings} {Reveal} {Statistical} {Laws} of {Semantic} {Change}},
url = {http://arxiv.org/abs/1605.09096},
abstract = {Understanding how words change their meanings over time is key to models of language and cultural evolution, but historical data on meaning is scarce, making theories hard to develop and test. Word embeddings show promise as a diachronic tool, but have not been carefully evaluated. We develop a robust methodology for quantifying semantic change by evaluating word embeddings (PPMI, SVD, word2vec) against known historical changes. We then use this methodology to reveal statistical laws of semantic evolution. Using six historical corpora spanning four languages and two centuries, we propose two quantitative laws of semantic change: (i) the law of conformity—the rate of semantic change scales with an inverse power-law of word frequency; (ii) the law of innovation—independent of frequency, words that are more polysemous have higher rates of semantic change.},
language = {en},
urldate = {2021-04-07},
journal = {arXiv:1605.09096 [cs]},
author = {Hamilton, William L. and Leskovec, Jure and Jurafsky, Dan},
month = oct,
year = {2018},
note = {arXiv: 1605.09096},
keywords = {Computer Science - Computation and Language},
file = {Hamilton et al. - 2018 - Diachronic Word Embeddings Reveal Statistical Laws.pdf:/Users/cbarrie6/Zotero/storage/AYCGHHUV/Hamilton et al. - 2018 - Diachronic Word Embeddings Reveal Statistical Laws.pdf:application/pdf},
}
@article{giulianelli_analysing_2020,
title = {Analysing {Lexical} {Semantic} {Change} with {Contextualised} {Word} {Representations}},
url = {http://arxiv.org/abs/2004.14118},
doi = {10.18653/v1/2020.acl-main.365},
abstract = {This paper presents the first unsupervised approach to lexical semantic change that makes use of contextualised word representations. We propose a novel method that exploits the BERT neural language model to obtain representations of word usages, clusters these representations into usage types, and measures change along time with three proposed metrics. We create a new evaluation dataset and show that the model representations and the detected semantic shifts are positively correlated with human judgements. Our extensive qualitative analysis demonstrates that our method captures a variety of synchronic and diachronic linguistic phenomena. We expect our work to inspire further research in this direction.},
language = {en},
urldate = {2021-04-07},
journal = {Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
author = {Giulianelli, Mario and Del Tredici, Marco and Fernández, Raquel},
year = {2020},
note = {arXiv: 2004.14118},
keywords = {Computer Science - Computation and Language, Computer Science - Computers and Society},
pages = {3960--3973},
file = {Giulianelli et al. - 2020 - Analysing Lexical Semantic Change with Contextuali.pdf:/Users/cbarrie6/Zotero/storage/QJ55QW5V/Giulianelli et al. - 2020 - Analysing Lexical Semantic Change with Contextuali.pdf:application/pdf},
}
@article{soni_abolitionist_2021,
title = {Abolitionist {Networks}: {Modeling} {Language} {Change} in {Nineteenth}-{Century} {Activist} {Newspapers}},
shorttitle = {Abolitionist {Networks}},
url = {http://arxiv.org/abs/2103.07538},
abstract = {The abolitionist movement of the nineteenth-century United States remains among the most significant social and political movements in US history. Abolitionist newspapers played a crucial role in spreading information and shaping public opinion around a range of issues relating to the abolition of slavery. These newspapers also serve as a primary source of information about the movement for scholars today, resulting in powerful new accounts of the movement and its leaders. This paper supplements recent qualitative work on the role of women in abolition’s vanguard, as well as the role of the Black press, with a quantitative text modeling approach. Using diachronic word embeddings, we identify which newspapers tended to lead lexical semantic innovations — the introduction of new usages of specific words — and which newspapers tended to follow. We then aggregate the evidence across hundreds of changes into a weighted network with the newspapers as nodes; directed edge weights represent the frequency with which each newspaper led the other in the adoption of a lexical semantic change. Analysis of this network reveals pathways of lexical semantic influence, distinguishing leaders from followers, as well as others who stood apart from the semantic changes that swept through this period. More specifically, we find that two newspapers edited by women — THE PROVINCIAL FREEMAN and THE LILY — led a large number of semantic changes in our corpus, lending additional credence to the argument that a multiracial coalition of women led the abolitionist movement in terms of both thought and action. It also contributes additional complexity to the scholarship that has sought to tease apart the relation of the abolitionist movement to the women’s suffrage movement, and the vexed racial politics that characterized their relation.},
language = {en},
urldate = {2021-04-07},
journal = {arXiv:2103.07538 [cs]},
author = {Soni, Sandeep and Klein, Lauren and Eisenstein, Jacob},
month = mar,
year = {2021},
note = {arXiv: 2103.07538},
keywords = {Computer Science - Computation and Language, Computer Science - Computers and Society, Computer Science - Digital Libraries, Computer Science - Social and Information Networks},
file = {Soni et al. - 2021 - Abolitionist Networks Modeling Language Change in.pdf:/Users/cbarrie6/Zotero/storage/4WNJYYVF/Soni et al. - 2021 - Abolitionist Networks Modeling Language Change in.pdf:application/pdf},
}
@article{kulkarni_statistically_2014,
title = {Statistically {Significant} {Detection} of {Linguistic} {Change}},
url = {http://arxiv.org/abs/1411.3315},
abstract = {We propose a new computational approach for tracking and detecting statistically significant linguistic shifts in the meaning and usage of words. Such linguistic shifts are especially prevalent on the Internet, where the rapid exchange of ideas can quickly change a word’s meaning. Our meta-analysis approach constructs property time series of word usage, and then uses statistically sound change point detection algorithms to identify significant linguistic shifts. We consider and analyze three approaches of increasing complexity to generate such linguistic property time series, the culmination of which uses distributional characteristics inferred from word co-occurrences. Using recently proposed deep neural language models, we first train vector representations of words for each time period. Second, we warp the vector spaces into one unified coordinate system. Finally, we construct a distance-based distributional time series for each word to track it’s linguistic displacement over time.},
language = {en},
urldate = {2021-04-07},
journal = {arXiv:1411.3315 [cs]},
author = {Kulkarni, Vivek and Al-Rfou, Rami and Perozzi, Bryan and Skiena, Steven},
month = nov,
year = {2014},
note = {arXiv: 1411.3315},
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning, Computer Science - Information Retrieval, H.3.3, I.2.6},
file = {Kulkarni et al. - 2014 - Statistically Significant Detection of Linguistic .pdf:/Users/cbarrie6/Zotero/storage/PIF9JAKI/Kulkarni et al. - 2014 - Statistically Significant Detection of Linguistic .pdf:application/pdf},
}
@inproceedings{kusner_word_2015,
title = {From {Word} {Embeddings} {To} {Document} {Distances}},
url = {http://proceedings.mlr.press/v37/kusnerb15.html},
abstract = {We present the Word Mover’s Distance (WMD), a novel distance function between text documents. Our work is based on recent results in word embeddings that learn semantically meaningful representatio...},
language = {en},
urldate = {2021-04-07},
booktitle = {International {Conference} on {Machine} {Learning}},
publisher = {PMLR},
author = {Kusner, Matt and Sun, Yu and Kolkin, Nicholas and Weinberger, Kilian},
month = jun,
year = {2015},
note = {ISSN: 1938-7228},
pages = {957--966},
file = {Full Text PDF:/Users/cbarrie6/Zotero/storage/V3JWT2YC/Kusner et al. - 2015 - From Word Embeddings To Document Distances.pdf:application/pdf;Snapshot:/Users/cbarrie6/Zotero/storage/W6KS8A7A/kusnerb15.html:text/html},
}
@article{sia_tired_2020,
title = {Tired of {Topic} {Models}? {Clusters} of {Pretrained} {Word} {Embeddings} {Make} for {Fast} and {Good} {Topics} too!},
shorttitle = {Tired of {Topic} {Models}?},
url = {http://arxiv.org/abs/2004.14914},
abstract = {Topic models are a useful analysis tool to uncover the underlying themes within document collections. The dominant approach is to use probabilistic topic models that posit a generative story, but in this paper we propose an alternative way to obtain topics: clustering pretrained word embeddings while incorporating document information for weighted clustering and reranking top words. We provide benchmarks for the combination of different word embeddings and clustering algorithms, and analyse their performance under dimensionality reduction with PCA. The best performing combination for our approach performs as well as classical topic models, but with lower runtime and computational complexity.},
language = {en},
urldate = {2021-04-07},
journal = {arXiv:2004.14914 [cs]},
author = {Sia, Suzanna and Dalmia, Ayush and Mielke, Sabrina J.},
month = oct,
year = {2020},
note = {arXiv: 2004.14914},
keywords = {Computer Science - Computation and Language},
file = {Sia et al. - 2020 - Tired of Topic Models Clusters of Pretrained Word.pdf:/Users/cbarrie6/Zotero/storage/2SW49AZI/Sia et al. - 2020 - Tired of Topic Models Clusters of Pretrained Word.pdf:application/pdf},
}
@article{mcinnes_umap_2020,
title = {{UMAP}: {Uniform} {Manifold} {Approximation} and {Projection} for {Dimension} {Reduction}},
shorttitle = {{UMAP}},
url = {http://arxiv.org/abs/1802.03426},
abstract = {UMAP (Uniform Manifold Approximation and Projection) is a novel manifold learning technique for dimension reduction. UMAP is constructed from a theoretical framework based in Riemannian geometry and algebraic topology. The result is a practical scalable algorithm that applies to real world data. The UMAP algorithm is competitive with t-SNE for visualization quality, and arguably preserves more of the global structure with superior run time performance. Furthermore, UMAP has no computational restrictions on embedding dimension, making it viable as a general purpose dimension reduction technique for machine learning.},
urldate = {2021-06-14},
journal = {arXiv:1802.03426 [cs, stat]},
author = {McInnes, Leland and Healy, John and Melville, James},
month = sep,
year = {2020},
note = {arXiv: 1802.03426},
keywords = {Computer Science - Machine Learning, Computer Science - Computational Geometry, Statistics - Machine Learning},
file = {arXiv Fulltext PDF:/Users/cbarrie6/Zotero/storage/M39GAHT4/McInnes et al. - 2020 - UMAP Uniform Manifold Approximation and Projectio.pdf:application/pdf;arXiv.org Snapshot:/Users/cbarrie6/Zotero/storage/YZ2DTERL/1802.html:text/html},
}
@article{arora_linear_2018,
title = {Linear {Algebraic} {Structure} of {Word} {Senses}, with {Applications} to {Polysemy}},
url = {http://arxiv.org/abs/1601.03764},
abstract = {Word embeddings are ubiquitous in NLP and information retrieval, but it is unclear what they represent when the word is polysemous. Here it is shown that multiple word senses reside in linear superposition within the word embedding and simple sparse coding can recover vectors that approximately capture the senses. The success of our approach, which applies to several embedding methods, is mathematically explained using a variant of the random walk on discourses model (Arora et al., 2016). A novel aspect of our technique is that each extracted word sense is accompanied by one of about 2000 “discourse atoms” that gives a succinct description of which other words co-occur with that word sense. Discourse atoms can be of independent interest, and make the method potentially more useful. Empirical tests are used to verify and support the theory.},
language = {en},
urldate = {2021-07-01},
journal = {arXiv:1601.03764 [cs, stat]},
author = {Arora, Sanjeev and Li, Yuanzhi and Liang, Yingyu and Ma, Tengyu and Risteski, Andrej},
month = dec,
year = {2018},
note = {arXiv: 1601.03764},
keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning, Statistics - Machine Learning},
file = {Arora et al. - 2018 - Linear Algebraic Structure of Word Senses, with Ap.pdf:/Users/cbarrie6/Zotero/storage/6VH2EY5Y/Arora et al. - 2018 - Linear Algebraic Structure of Word Senses, with Ap.pdf:application/pdf},
}
@article{rozado_using_2021,
title = {Using word embeddings to probe sentiment associations of politically loaded terms in news and opinion articles from news media outlets},
issn = {2432-2717, 2432-2725},
url = {https://link.springer.com/10.1007/s42001-021-00130-y},
doi = {10.1007/s42001-021-00130-y},
abstract = {This work describes an analysis of political associations in 27 million diachronic (1975–2019) news and opinion articles from 47 news media outlets popular in the United States. We use embedding models trained on individual outlets content to quantify outlet-specific latent associations between positive/negative sentiment words and terms loaded with political connotations such as those describing political orientation, party affiliation, names of influential politicians, and ideologically aligned public figures. We observe that both left- and right-leaning news media tend to associate positive sentiment words with terms used to refer to members of their own political in-group and negative sentiment words with terms used to denote members of their ideological outgroup. Outlets rated as centrist by humans display political associations that are often milder but similar in orientation to those of leftleaning news organizations. A weighted average of political associations by outlets’ readership volume hints that political associations embedded in left of center news outlets might have larger societal reach. A chronological analysis of political associations through time suggests that political sentiment polarization is increasing in both left- and right-leaning news media contents. Our approach for measuring sentiment associations of words denoting political orientation in outlet-specific embedding models correlates substantially with external human ratings of outlet ideological bias (r {\textgreater} 0.7). Yet, specific sentiment associations are sometimes multifaceted and challenging to interpret. Overall, our work signals the potential of machine learning models derived from news media language usage to quantify the ideological bias embedded in news outlet content.},
language = {en},
urldate = {2021-07-29},
journal = {Journal of Computational Social Science},
author = {Rozado, David and al-Gharbi, Musa},
month = jul,
year = {2021},
file = {Rozado and al-Gharbi - 2021 - Using word embeddings to probe sentiment associati.pdf:/Users/cbarrie6/Zotero/storage/EH7ZA69M/Rozado and al-Gharbi - 2021 - Using word embeddings to probe sentiment associati.pdf:application/pdf},
}
@book{firth_studies_1957,
address = {London},
title = {Studies in linguistic analysis},
publisher = {Wiley-Blackwell},
author = {Firth, John Rupert},
year = {1957},
}
@article{hamoud_egypts_2019,
title = {Egypt’s {Military} {Coup} of 2013: {An} {Analysis} of the {Private} {Press} in 112 {Days}},
volume = {9},
issn = {2051-0861},
shorttitle = {Egypt’s {Military} {Coup} of 2013},
url = {https://journals.le.ac.uk/ojs1/index.php/nmes/article/view/3240},
doi = {10.29311/nmes.v9i2.3240},
abstract = {In July 2013, a widely celebrated military coup took place in Egypt – only two and a half years after the 2011 Revolution. This article investigates the hegemonic power of the private press owned by Egypt‟s business elite during the coup period. In the context of this research objective, this study answers the question of “how did Egypt‟s private press provide popular support to the 2013 military coup?” through following a critical approach and conducting framing analysis of news headlines in a period of 112 days. This article examines the popular privately-owned newspapers al-Masry al-Youm and al-Watan as the main case studies. Findings show that Egypt‟s business elite engaged in significant news framing in support of the 2013 military coup - for the purpose of maintaining their hegemonic position in the country. They particularly used the frame of “fear” (of the Muslim Brotherhood, potential violence and political chaos) and the frame of “promotion” by portraying the military as a saviour.},
language = {en},
number = {2},
urldate = {2021-08-24},
journal = {New Middle Eastern Studies},
author = {Hamoud, Maher},
month = aug,
year = {2019},
file = {Hamoud - 2019 - Egypt’s Military Coup of 2013 An Analysis of the .pdf:/Users/cbarrie6/Zotero/storage/CUVCTVT9/Hamoud - 2019 - Egypt’s Military Coup of 2013 An Analysis of the .pdf:application/pdf},
}
@article{abdulla_navigating_2016,
title = {Navigating the {Boundaries} {Between} {State} {Television} and {Public} {Broadcasting} in {Pre}- and {Post}-{Revolution} {Egypt}},
abstract = {This article navigates the boundaries between state and media in times of transition by presenting a case study of the Egyptian Radio and Television Union (ERTU). Government-owned television has always been used for the interest of repressive regimes in Egypt, where the boundary between public service broadcasting (PSB) and state television has been blurry. ERTU has posed itself as public service media, although its allegiance remains to the state rather than to the people. The January 25, 2011, revolution was a chance for reform, but not much has changed. This article uses personal interviews and qualitative analysis of legal documents to examine ERTU’s legal framework, funding, diversity of content, and editorial independence. It analyzes the situation in terms of the current political context and makes recommendations for turning state television into PSB.},
language = {en},
author = {Abdulla, Rasha A},
year = {2016},
pages = {20},
file = {Abdulla - 2016 - Navigating the Boundaries Between State Television.pdf:/Users/cbarrie6/Zotero/storage/UHB97CG8/Abdulla - 2016 - Navigating the Boundaries Between State Television.pdf:application/pdf},
}
@article{moore-gilbert_authoritarian_nodate,
title = {Authoritarian downgrading, (self)censorship and new media activism after the {Arab} {Spring}},
abstract = {While considerable scholarly attention has focused on analysing the role and impact of new media during the Arab Spring uprisings of 2010–2011, comparatively little research has been devoted to examining how online activism has changed in response to the regime stabilisation measures undertaken by the governments which survived the unrest. Characterising the de-liberalisation policies of post–Arab Spring states as ‘authoritarian downgrading’, this article considers how the growing involvement of authoritarian regimes in online spaces is impacting activists’ use of new media technologies. Adopting Bahrain as a case study, we present the results of a survey of Bahraini political activists conducted in 2017 and consider whether activists’ perceptions of their online safety and security are impacting their use of new media through behaviours such as selfcensorship, the adoption of pseudonyms and the preferencing of direct messaging apps over Arab Spring-era social media platforms.},
language = {en},
journal = {new media},
author = {Moore-Gilbert, Kylie and Abdul-Nabi, Zainab},
pages = {19},
file = {Moore-Gilbert and Abdul-Nabi - Authoritarian downgrading, (self)censorship and ne.pdf:/Users/cbarrie6/Zotero/storage/WG9EEB6L/Moore-Gilbert and Abdul-Nabi - Authoritarian downgrading, (self)censorship and ne.pdf:application/pdf},
}
@article{el_issawi_shifting_2016,
title = {Shifting journalistic roles in democratic transitions: {Lessons} from {Egypt}},
volume = {17},
issn = {1464-8849, 1741-3001},
shorttitle = {Shifting journalistic roles in democratic transitions},
url = {http://journals.sagepub.com/doi/10.1177/1464884915576732},
doi = {10.1177/1464884915576732},
abstract = {While in the case of the Arab Spring the focus of research and debate was very much on the role of social media in enabling political change both during the uprisings and in their immediate aftermath, the impact of traditional national mass media and journalism on framing this political change has been less addressed. In this article, we investigate the role of Egyptian journalists in shaping Egypt’s complex and fast-moving political transition. Based on a thematic analysis of in-depth interviews and a conceptual framework building on Christians et al.’s normative roles of the media, it can be concluded that the monitorial and facilitative roles, which were prevalent in the early stages of the post-Mubarak era, were quickly overturned in favor of a radical and collaborative role. Egyptian journalists working in private media thus demonized their political adversaries, mainly the Islamists, transforming this political ‘other’ into the ultimate enemy. At the same time, the new military regime was being revered and celebrated. This arguably contributed to further destabilize the fragile transition to democracy. It is furthermore concluded that for democracy to succeed in an Egyptian context, antagonistic political conflicts need to be transformed into agonistic ones both at the level of political culture and media culture.},
language = {en},
number = {5},
urldate = {2021-08-24},
journal = {Journalism},
author = {el Issawi, Fatima and Cammaerts, Bart},
month = jul,
year = {2016},
pages = {549--566},
file = {el Issawi and Cammaerts - 2016 - Shifting journalistic roles in democratic transiti.pdf:/Users/cbarrie6/Zotero/storage/EZYDUL38/el Issawi and Cammaerts - 2016 - Shifting journalistic roles in democratic transiti.pdf:application/pdf},
}
@article{issawi_egyptian_2020,
title = {Egyptian journalists and the struggle for change following the 2011 uprising: {The} ambiguous journalistic agency between change and conformity},
volume = {82},
issn = {1748-0485, 1748-0493},
shorttitle = {Egyptian journalists and the struggle for change following the 2011 uprising},
url = {http://journals.sagepub.com/doi/10.1177/1748048519897516},
doi = {10.1177/1748048519897516},
abstract = {The Egyptian media displayed a high level of content diversity in the final years of the Mubarak regime, prior to the 2011 uprising. This diversity expanded considerably after the uprising when national media embodied expressions of dissent with unprecedented openness, in defiance of the entrenched identity of the journalist as the regime’s guard. This article investigates the dynamics of journalistic agency in Egyptian newsrooms in search for a new identity, investigating the challenges, hopes and trade-offs of a painful process of change. It looks at the complex interplay between these agentic dynamics and inherited structures within an uncertain and highly contested transition to democracy, which finally collapsed into a new chapter of authoritarianism. The article argues that while journalistic agency helped support trends towards democratization in media and politics in the immediate aftermath of the uprising, it also acted as powerful platform in ‘othering’ opponents preparing the ground for the return of autocratic practices and ultimately the fall of the democratic experiment.},
language = {en},
number = {7},
urldate = {2021-08-24},
journal = {International Communication Gazette},
author = {Issawi, Fatima el},
month = nov,
year = {2020},
pages = {628--645},
file = {Issawi - 2020 - Egyptian journalists and the struggle for change f.pdf:/Users/cbarrie6/Zotero/storage/M9LTG27T/Issawi - 2020 - Egyptian journalists and the struggle for change f.pdf:application/pdf},
}
@article{abdulla_egypts_nodate,
title = {{EGYPT}’{S} {MEDIA} {IN} {THE} {MIDST} {OF} {REVOLUTION}},
language = {en},
author = {Abdulla, Rasha},
pages = {46},
file = {Abdulla - EGYPT’S MEDIA IN THE MIDST OF REVOLUTION.pdf:/Users/cbarrie6/Zotero/storage/3SLXSVAW/Abdulla - EGYPT’S MEDIA IN THE MIDST OF REVOLUTION.pdf:application/pdf},
}
@incollection{douai_mapping_2016,
address = {London},
title = {Mapping the “{Arab} {Autumn}”: {A} {Framing} {Analysis} of {CBC} and {Al}-{Nahar} {Networks}’ {Coverage} of {Egypt}’s {Military} {Coup}},
isbn = {978-1-137-58140-2 978-1-137-58141-9},
shorttitle = {Mapping the “{Arab} {Autumn}”},
url = {http://link.springer.com/10.1057/978-1-137-58141-9_8},
language = {en},
urldate = {2021-08-24},
booktitle = {Mediated {Identities} and {New} {Journalism} in the {Arab} {World}},
publisher = {Palgrave Macmillan UK},
author = {Elmasry, Mohamad H. and Auter, Philip J. and Makady, Heidi},
editor = {Douai, Aziz and Ben Moussa, Mohamed},
year = {2016},
doi = {10.1057/978-1-137-58141-9_8},
pages = {143--163},
file = {Elmasry et al. - 2016 - Mapping the “Arab Autumn” A Framing Analysis of C.pdf:/Users/cbarrie6/Zotero/storage/SLPDW6EL/Elmasry et al. - 2016 - Mapping the “Arab Autumn” A Framing Analysis of C.pdf:application/pdf},
}
@article{hafez_radically_2015,
title = {Radically {Polarized} {Publics} and the {Demise} of {Media} {Freedom} in {Egypt}},
issn = {1110-5097, 2090-7273},
url = {http://journals.openedition.org/ema/3397},
doi = {10.4000/ema.3397},
language = {en},
number = {12},
urldate = {2021-08-24},
journal = {Égypte/Monde arabe},
author = {Hafez, Kai},
month = mar,
year = {2015},
pages = {37--49},
file = {Hafez - 2015 - Radically Polarized Publics and the Demise of Medi.pdf:/Users/cbarrie6/Zotero/storage/EQ22KCEJ/Hafez - 2015 - Radically Polarized Publics and the Demise of Medi.pdf:application/pdf},
}
@article{koob_egyptian-media-under-transition_nodate,
title = {Egyptian-{Media}-{Under}-{Transition}},
language = {en},
author = {Koob, Marion},
pages = {94},
file = {Koob - Egyptian-Media-Under-Transition.pdf:/Users/cbarrie6/Zotero/storage/2YAXC2AW/Koob - Egyptian-Media-Under-Transition.pdf:application/pdf},
}
@article{hamdy_framing_2012,
title = {Framing the {Egyptian} {Uprising} in {Arabic} {Language} {Newspapers} and {Social} {Media}},
volume = {62},
issn = {00219916},
url = {https://academic.oup.com/joc/article/62/2/195-211/4085781},
doi = {10.1111/j.1460-2466.2012.01637.x},
language = {en},
number = {2},
urldate = {2021-08-24},
journal = {Journal of Communication},
author = {Hamdy, Naila and Gomaa, Ehab H.},
month = apr,
year = {2012},
pages = {195--211},
file = {Hamdy and Gomaa - 2012 - Framing the Egyptian Uprising in Arabic Language N.pdf:/Users/cbarrie6/Zotero/storage/H9HEUE9U/Hamdy and Gomaa - 2012 - Framing the Egyptian Uprising in Arabic Language N.pdf:application/pdf},
}
@article{radelet_rise_2015,
title = {The {Rise} of the {World}’s {Poorest} {Countries}},
volume = {26},
issn = {1086-3214},
url = {https://muse.jhu.edu/content/crossref/journals/journal_of_democracy/v026/26.4.radelet.html},
doi = {10.1353/jod.2015.0061},
language = {en},
number = {4},
urldate = {2021-08-24},
journal = {Journal of Democracy},
author = {Radelet, Steven},
year = {2015},
pages = {5--19},
file = {Radelet - 2015 - The Rise of the World’s Poorest Countries.pdf:/Users/cbarrie6/Zotero/storage/93DIHZ4F/Radelet - 2015 - The Rise of the World’s Poorest Countries.pdf:application/pdf},
}
@article{hearns-branaman_effects_2020,
title = {The effects of \textit{coups d’état} on journalists: {The} case of the 2014 {Thai} coup as both exemplary and exceptional},
volume = {47},
issn = {0129-6612, 2377-6277},
shorttitle = {The effects of \textit{coups d’état} on journalists},
url = {https://www.tandfonline.com/doi/full/10.1080/01296612.2020.1829858},
doi = {10.1080/01296612.2020.1829858},
language = {en},
number = {3-4},
urldate = {2021-08-24},
journal = {Media Asia},
author = {Hearns-Branaman, Jesse Owen},
month = oct,
year = {2020},
pages = {110--122},
file = {Hearns-Branaman - 2020 - The effects of coups d’état on journalists.pdf:/Users/cbarrie6/Zotero/storage/WXTNI6WX/Hearns-Branaman - 2020 - The effects of coups d’état on journalists.pdf:application/pdf},
}
@article{eissa_use_nodate,
title = {Use of hate speech in {Arabic} language newspapers},
language = {en},
author = {Eissa, Sarah},
pages = {156},
file = {Eissa - Use of hate speech in Arabic language newspapers.pdf:/Users/cbarrie6/Zotero/storage/U8ZBJXSP/Eissa - Use of hate speech in Arabic language newspapers.pdf:application/pdf},
}
@article{verkamp_five_nodate,
title = {Five incidents, one theme: {Twitter} spam as a weapon to drown voices of protest},
abstract = {Social networking sites, such as Twitter and Facebook, have become an impressive force in the modern world with user bases larger than many individual countries. With such influence, they have become important in the process of worldwide politics. Those seeking to be elected often use social networking accounts to promote their agendas while those opposing them may seek to either counter those views or drown them in a sea of noise. Building on previous work that analyzed a Russian event where Twitter spam was used as a vehicle to suppress political speech, we inspect five political events from 2011 and 2012: two related to China and one each from Syria, Russia, and Mexico. Each of these events revolved around popular Twitter hashtags which were inundated with spam tweets intended to overwhelm the original content.},
language = {en},
author = {Verkamp, John-Paul and Gupta, Minaxi},
pages = {7},
file = {Verkamp and Gupta - Five incidents, one theme Twitter spam as a weapo.pdf:/Users/cbarrie6/Zotero/storage/3EK6XXE9/Verkamp and Gupta - Five incidents, one theme Twitter spam as a weapo.pdf:application/pdf},
}
@article{mcmillan_how_2004,
title = {How to {Subvert} {Democracy}: {Montesinos} in {Peru}},
volume = {18},
issn = {0895-3309},
shorttitle = {How to {Subvert} {Democracy}},
url = {https://pubs.aeaweb.org/doi/10.1257/0895330042632690},
doi = {10.1257/0895330042632690},
abstract = {Which of the democratic checks and balances–opposition parties, the judiciary, a free press–is the most forceful? Peru has the full set of democratic institutions. In the 1990s, the secret-police chief Montesinos systematically undermined them all with bribes. We quantify the checks using the bribe prices. Montesinos paid a television-channel owner about 100 times what he paid a judge or a politician. One single television channel's bribe was five times larger than the total of the opposition politicians' bribes. By revealed preference, the strongest check on the government's power was the news media.},
language = {en},
number = {4},
urldate = {2021-08-24},
journal = {Journal of Economic Perspectives},
author = {McMillan, John and Zoido, Pablo},