-
Notifications
You must be signed in to change notification settings - Fork 0
/
PCM20210908_SICP_1.3.1.2_Procedures_as_Arguments_II.jl
2023 lines (1687 loc) · 80.4 KB
/
PCM20210908_SICP_1.3.1.2_Procedures_as_Arguments_II.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
### A Pluto.jl notebook ###
# v0.19.27
using Markdown
using InteractiveUtils
# ╔═╡ d31c78a4-66e9-4196-8fb6-0aba3e4ae0d7
using Statistics, Plots, LaTeXStrings
# ╔═╡ 951cce60-4e4f-11ee-379a-4385f4005380
md"
===================================================================================
#### SICP: 1.3.1.2 Procedures as Arguments II: Nonparametric Voting Model
##### file: PCM20210908\_SICP\_1.3.1.2\_Procedures\_as\_Arguments\_II.jl
##### Julia/Pluto.jl-code (1.9.3/19.27) by PCM *** 2023/11/05 ***
===================================================================================
"
# ╔═╡ dbdf0a16-71ed-4a83-99a1-f8119fb84ffe
# idiomatic Julia-code with 'Function', 'Real', 'while', '+='
#------------------------------------------------------------------
function sum2(f::Function, a::Real, succ::Function, b::Real)::Real
sum = 0
while !(a > b)
sum += f(a)
a = succ(a)
end # while
sum
end # function sum2
# ╔═╡ a76a6491-b5e9-45a2-8b14-d54cc1586e9d
md"
---
##### 1.3.1.2.1 Family of Gaussians as Components of Cognitive Choice Models
A *family* of Gaussians was proposed by [*Thurstone, L.L.* (1945)](file:///C:/Users/claus/Downloads/BF02288891.pdf) and [Ahrens, H.J. & Möbus, C.* (1968)](http://oops.uni-oldenburg.de/2729/1/PCM1968.pdf), as a component of a *cognitive stochastic* model for choice predictions. Each distribution of this family represents subjective *latent* affects related to one stimulus in a set of alternatives. The model predicts the percentage of *first* choice for each stimulus in the set of alternatives. The latent affect dispersions (= *discriminal dispersions* in Thurstone's own words) are supposed to be measured by psychological scaling methods (e.g. rating scales) to get empirical data for affect values (for e.g. sensations, attitudes, subjective utility, moral or aestetic sentiments).
"
# ╔═╡ f3d7c861-4acb-4f9d-92e7-b1bec765c02d
gaussianDensity(x; μ=0.0, σ=1.0) = 1/(σ*sqrt(2π))*exp(-(1/2)*((x-μ)/σ)^2)
# ╔═╡ a21ce88d-141f-46d6-b7fd-af5daf70ca02
begin
gaussianDensityI(x) = gaussianDensity(x; μ=-1.0, σ=3.0)
gaussianDensityJ(x) = gaussianDensity(x; μ=+1.0, σ=1.0)
gaussianDensityK(x) = gaussianDensity(x; μ=+3.0, σ=2.0)
uniformDensityL(x; a=-10, b=+10) = 1/(b - a)
#-------------------------------------------------------
density = Array{Function, 1}(undef, 4)
density[1] = gaussianDensityI
density[2] = gaussianDensityJ
density[3] = gaussianDensityK
density[4] = uniformDensityL
density
end # begin
# ╔═╡ 83b2d8cc-6790-4b62-a5e4-9fb308898335
let x = -0.3
plot(gaussianDensityI, -11.0, x, size=(700, 500), xlim=(-11.0, 11.0), ylim=(0, 0.45), line=:darkblue, fill=(0, :lightblue), title="Discriminal Dispersions of Thurstone's Choice Model", xlabel=L"Latent Affective Variable $S:\;(\mu(S_i)=-1.0)<(\mu(S_j)=+1.0)<(\mu(S_k)=+3.0)$", ylabel=L"Density $f(X)$", label=L"$f_i(X)$")
plot!(gaussianDensityI, x, +11, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_i(X)$")
#--------------------------------------------------------------------------------
plot!(gaussianDensityJ, -11.0, x, size=(700, 500), line=:darkblue, framestyle=:semi, fill=(0, :lightblue), label=L"$f_j(X)$")
plot!(gaussianDensityJ, x, +11, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_j(X)$")
#--------------------------------------------------------------------------------
plot!(gaussianDensityK, -11.0, x, size=(700, 500), line=:darkblue, framestyle=:semi, fill=(0, :lightblue), label=L"$f_k(X)$")
plot!(gaussianDensityK, x, +11.0, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_k(X)$")
#------------------------------------------------------------------------------
annotate!([(-4.0, 0.09, text(L"Stimulus $S_i$", 12, :darkblue))])
annotate!([(-0.7, 0.30, text(L"Stimulus $S_j$", 12, :darkblue))])
annotate!([(+5.8, 0.14, text(L"Stimulus $S_k$", 12, :darkblue))])
#--------------------------------------------------------------------------------
plot!([x, x], [0, gaussianDensityJ(x)], seriestype=:line, color=:red, label=L"$f_j(X=-0.3)$")
plot!(gaussianDensityI, -11.0, x, size=(700, 500), line=:darkblue, label=L"$f_i(X)$")
end # let
# ╔═╡ 80b9a128-bd19-4851-b3f7-b699700fbd08
md"
---
###### Thurstone's Choice Model
According to Thurstone the probability
$P_i(i > j = 1, ..., k; j\neq i)$
$\;$
that a stimulus $S_i$ is a *first* choice when presented together with a set of alternatives $j = 1, ..., k; j\neq i$ is:
$P_i(i > j = 1, ..., k; j\neq i) = \int_{-\infty}^{+\infty}f_i(x)\prod_{j=1; j\neq i}^k p_j(x)\;dx$
$\;$
$\;$
$\;$
were:
$p_j(x) = \int_{-\infty}^x f_j(x)\;dx$
$\;$
$\;$
$\;$
$\;$
We'll present below an alternative model with less demanding assumptions. We call this model *Voting Choice Model*. According to this model the probability of preference mass (PoPM) is distributed across all discriminal dispersions starting from the most positive affect $x$ walking down to least positive affect till the PoPM
is exhausted.
"
# ╔═╡ 14ccb9a2-ec6e-4a2b-a2cf-f3d78deba2fc
md"
The *informal* meaning is that the strength of preference for stimulus $S_i$ at the point of affect strength $x$ is the product of the density $f_i(x)$ (vertical *red* line in the graphic above) and the probabilities $p_{j=1,...,k; j \ne i}(x)$ (marked by *blue* shaded areas left of the vertical *red* line in the above graphic).
The product $\prod_{j=1,...,k; j \ne i}^k p_j(x)$ is the probability that *all* alternatives $S_j$ of $S_i$ stimulate a *lower* affective value than $x$.
The *total* preference strength or probability $P_i(i > j = 1, ..., k; j\neq i)$ is the integral over the total range of sensations.
The *formal* meaning of $P_i(i > j = 1, ..., k; j\neq i)$ is that of a conditional *expected value*. It is the expectation for $S_i$ that the alternative set $S_{j = 1, ..., k; j\neq i}$ stimulates lower affects.
$\;$
$\mathbb E(p_j(x)|i) = P_i(i > j = 1, ..., k; j\neq i)$
$\;$
Because of its characteristic as an *expectation* we expect that the model favors stimuli with large variance. This can lead to various interesting and surprising effects as are discussed by Thurstone.
The assumptions of the model are rather demanding. There is the hypothesis that there exists a cognitive mechanism which computes for stimulus $S_i$ for *each* point of affect $x$ the preference strength of $S_i$ in relation to all alternatives. Furthermore the mechanism integrates these preference strengths not only over the *total* range of $X_i$ but also for *all* stimuli.
We'll present below an alternative model with less demanding assumptions. We call this model *Voting Choice Model*. According to this model the probability of preference mass (PoPM) is distributed across all discriminal dispersions starting from the most positive affect $x$ walking down to least positive affect till the PoPM is exhausted.
"
# ╔═╡ a441e6a2-6d46-4887-9927-85a86c9f3257
function integral2(f, a, b; Δx=0.01)
add_Δx(x) = x + Δx
sum2(f::Function, (a + Δx/2.0), add_Δx, b) * Δx
end
# ╔═╡ fcc429c2-8e92-4a23-b374-6f9cf46067f2
uniformDensityl(x; a=-10.0, b=10.0) = 1/(b - a)
# ╔═╡ b3dc6e56-97ab-4a2a-9dfd-cbeb0a855aed
integral2(uniformDensityl, -10.0, 10.0, Δx=0.1) # ==> 1.000 test integral2
# ╔═╡ ec9f191f-8fa9-48e9-97f2-385ee4b2e218
function thurstoneChoiceModel(;kS=3, a=-10.0, b=+10.0, Δx=0.001)
prefProbs = zeros(Float64, kS) # initalization to 0.0
preferenceDensity = Array{Function, 1}(undef, 4)
for i in 1:kS
for j in 1:kS
if !(j == i)
for k in 1:kS
if !((k == i) || (k == j))
#-----------------------------------------------
if kS == 3
preferenceDensity[i] = x -> density[i](x) * integral2(density[j], -10.0, x, Δx=Δx) * integral2(density[k], -10.0, x, Δx=Δx)
end # if kS == 3
#-----------------------------------------------
if kS == 4
for l in 1:kS
if !((l == i) || (l == j) || (l == k))
preferenceDensity[i] = x -> density[i](x) * integral2(density[j], -10.0, x, Δx=Δx) * integral2(density[k], -10.0, x, Δx=Δx) * integral2(density[l], -10.0, x, Δx=Δx)
end # if
end # for l
end # if kS == 4
#-----------------------------------------------
end # if k
end # for k
end # if j
end # for j
prefProbs[i] = integral2(preferenceDensity[i], -10, +10, Δx=Δx) # Si
end # for i
prefProbs, sum(prefProbs)
end # function thurstoneChoiceModel
# ╔═╡ b97475c2-771e-4f52-925d-831b0babc3d3
md"
$P(S_i > S_j, S_k) = 0.109514$
$P(S_j > S_i, S_K) = 0.14831$
$P(S_k > S_i, S_j) = 0.74062$
$\;$
"
# ╔═╡ 83f46574-345e-40a9-a14b-6ab6cbeb2f40
thurstoneChoiceModel(Δx=0.001)
# ╔═╡ d3345a8f-5abf-467f-8c7a-2ae85e150474
md"
---
###### Introduction of a Controversial Candidate $S_l$
The introduction of a *controversial* candidate $S_l$ will distract preference from a more favorable *non*controversial candidate $S_k$.
"
# ╔═╡ e8478494-dba3-4448-a0fa-f523c29a1e99
let x = -0.3
#--------------------------------------------------------------------------------
plot(uniformDensityL, -10.0, x, size=(700, 500), line=:orange, framestyle=:semi, fill=(0, :lightblue), label=L"$f_l(X)$")
plot!(uniformDensityL, x, +10.0, size=(700, 500), line=:orange, label=L"$f_l(X)$")
#--------------------------------------------------------------------------------
plot!(gaussianDensityI, -11.0, x, size=(700, 500), xlim=(-11.0, 11.0), ylim=(0, 0.45), line=:darkblue, fill=(0, :lightblue), title="Discriminal Dispersions of Thurstone's Choice Model",xlabel=L"Latent Affective Variable $S:\;(\mu(S_i)=-1.)<(\mu(S_l)=0.)<(\mu(S_j)=1.)<(\mu(S_k)=3.)$", label=L"$f_i(X)$")
plot!(gaussianDensityI, x, +10, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_i(X)$")
#--------------------------------------------------------------------------------
plot!(gaussianDensityJ, -11.0, x, size=(700, 500), line=:darkblue, framestyle=:semi, fill=(0, :lightblue), label=L"$f_j(X)$")
plot!(gaussianDensityJ, x, +10, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_j(X)$")
#--------------------------------------------------------------------------------
plot!(gaussianDensityK, -11.0, x, size=(700, 500), line=:darkblue, framestyle=:semi, fill=(0, :lightblue), label=L"$f_k(X)$")
plot!(gaussianDensityK, x, +10.0, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_k(X)$")
#------------------------------------------------------------------------------
annotate!([(-4.0, 0.09, text(L"Stimulus $S_i$", 12, :darkblue))])
annotate!([(-0.7, 0.30, text(L"Stimulus $S_j$", 12, :darkblue))])
annotate!([(+5.8, 0.14, text(L"Stimulus $S_k$", 12, :darkblue))])
annotate!([(+8.5, 0.06, text(L"Stimulus $S_l$", 12, :darkblue))])
#--------------------------------------------------------------------------------
plot!([x, x], [0, gaussianDensityJ(x)], seriestype=:line, color=:red, label=L"$f_j(X=-0.3)$")
plot!(uniformDensityL, -10.0, x, size=(700, 500), line=:orange, label=L"$f_l(X)$")
plot!(gaussianDensityI, -11.0, x, size=(700, 500), line=:darkblue, label=L"$f_i(X)$")
end # let
# ╔═╡ c9242575-0d24-4108-9bd5-3590ad4d3c45
md"
Facit: The introduction of a *controversial* candidate $S_l$ will distract preference from the most favorable but *less* controversial candidate $S_k$:
$\;$
$P(S_i > S_j, S_k) = 0.109514 > P(S_i > S_j, S_k, S_l) = 0.0742588$
$P(S_j > S_i, S_K) = 0.14831 > P(S_j > S_i, S_k, S_l) = 0.0870487$
$P(S_k > S_i, S_j) = 0.74062 >> P(S_k > S_i, S_j, S_l) = 0.507737$
$P(S_l > S_i, S_j, S_k) = 0.329387$
$\;$
"
# ╔═╡ 0aeebc71-81a5-43d8-a698-b2cfb84d53c0
thurstoneChoiceModel(kS=4, Δx=0.001)
# ╔═╡ aeaf21e4-c7c2-4b95-a082-68c0e20c2ff8
md"
---
###### Our Voting Choice Model
The *Voting Choice Model* avoids the cognitive implausible product of densities and probabilities. Instead it assumes that on an *individual* level each person possesses a certain amount of preference of *voting* mass which will be distributed across the *latent affective* discriminal dispersions till this affective mass is exhausted (*blue* shade areas to the right of the vertical *red* line). This distribution process starts at $x = +\infty$. Then the $x$ moves to lower values down to the point $x_{crit}$ (*red* vertical line in the graphic below) where the sum of the $p_j(x)$ is $1.0$:
$\;$
$\sum_{j=1}^k p_j(x_{crit}) = \sum_{j=1}^k \int_{-\infty}^{x_{crit}} f_j(x)\;dx = 1.0$
$\;$
$\;$
$\;$
$\;$
"
# ╔═╡ 1b53baf1-376b-44b6-9718-48385a68fd3c
let x = 2.015
#--------------------------------------------------------------------------------
plot(gaussianDensityK, -10.0, x, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_k(X)$")
plot!(gaussianDensityK, x, 10.0, size=(700, 500), line=:darkblue, framestyle=:semi, fill=(0, :lightblue), label=L"$f_k(X)$")
#--------------------------------------------------------------------------------
plot!(gaussianDensityI, -10.0, x, size=(700, 500), xlim=(-10.0, 10.0), ylim=(0, 0.45), line=:darkblue, title="Discriminal Dispersions of Voting Choice model", xlabel=L"Latent Affective Variable $X$", ylabel=L"Density $f(X)$", label=L"$f_i(X)$")
plot!(gaussianDensityI, x, 10.0, size=(700, 500), line=:darkblue, framestyle=:semi, fill=(0, :lightblue), label=L"$f_i(X)$")
#--------------------------------------------------------------------------------
plot!(gaussianDensityJ, -10.0, x, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_j(X)$")
plot!(gaussianDensityJ, x, 10.0, size=(700, 500), line=:darkblue, framestyle=:semi, fill=(0, :lightblue), label=L"$f_j(X)$")
#--------------------------------------------------------------------------------
plot!(gaussianDensityI, x, 10.0, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_i(X)$")
plot!(gaussianDensityK, x, 10.0, size=(700, 500), line=:darkblue, framestyle=:semi, label=L"$f_k(X)$")
#------------------------------------------------------------------------------
annotate!([(-4.8, 0.08, text(L"Stimulus $S_i$", 12, :darkblue))])
annotate!([(-0.7, 0.30, text(L"Stimulus $S_j$", 12, :darkblue))])
annotate!([(+5.8, 0.14, text(L"Stimulus $S_k$", 12, :darkblue))])
#--------------------------------------------------------------------------------
plot!([x, x], [0, gaussianDensityJ(x)], seriestype=:line, color=:red, label=L"$f_j(x=-2.015)$")
#--------------------------------------------------------------------------------
end # let
# ╔═╡ 3947ae42-c5e2-4893-8793-4314f1485b5a
function votingChoiceModel(;kS=3, b=+10.0, Δx=0.001)
votingProb = zeros(Float64, kS) # initalization to 0.0
x = b
while !(1.0 < sum(votingProb))
for i in 1:kS
votingProb[i] = integral2(density[i], x, b, Δx=Δx)
end # for i
x = x - Δx
end # while
votingProb, sum(votingProb)
end # function votingChoiceModel
# ╔═╡ 04efd6b7-7d4f-4c80-b30f-3ac1b55e9bc1
votingChoiceModel()
# ╔═╡ 8a1c25a5-e091-49f7-8455-cc90b769a4d8
md"
Both models show strong agreement in predicting first choices at least for this example. The *Pearson* product-moment correlation coefficient (Nazarathy & Klok, 2021, p.123) is near 1.00 (!). Empirical Studies have to demonstrate what model is more useful: The simpler Voting Model or the more demanding Thurstone model.
"
# ╔═╡ be0ac072-efd0-4e41-a805-dce10550cca1
function modelComparison(title, x, y)
let rxy = trunc(Statistics.cor(x, y), digits=4)
xs = [0, 1]; ys = [0, 1]
#---------------------------------------------------------------------------
plot(x, y, xlims=(0.0, 1.0), ylims=(0.0, 1.0), title=title, seriestype=:scatter, xlabel="Thurstone Model", ylabel="Voting Model", label="model predictions")
plot!(xs, ys, label="line of perfect model agreement")
#---------------------------------------------------------------------------
annotate!([(0.7, 0.1, "r(model1, model2) = $rxy")])
end # let
end # function modelComparison
# ╔═╡ 4e22fd91-107a-4421-8f4d-bf12871cf4ef
modelComparison("Voting vs. Thurstone's Model (3 Stimuli)",
[0.109514, 0.14831, 0.74062], # Thurstone's model predictions
[0.157245, 0.154815, 0.688408]) # voting model predictions
# ╔═╡ 0b6e1719-d88b-4d17-a7b8-57b6942a07d6
modelComparison("Voting vs. Thurstone's Model (4 Stimuli)",
[0.0742588, 0.0870487, 0.507737, 0.329387], # Thurstone's model predictions
[0.0966776, 0.0287166, 0.519706, 0.355]) # voting model predictions
# ╔═╡ 40bfbb32-b766-4d45-89e1-6701495cd098
md"
---
##### 1.3.1.2.2 Application of *Nonparametric* Voting Model to Thurstone's 1st Artificial Dataset
We apply here the *nonparametric* version of our *voting model* to Thurstone first artificial data set. Thurstone describes his dataset as: *Here we* (Thurstone) *have chosen arbitrary bimodal and skewed distributions to illustrate the latitude of the method*. (Thurstone, 1945, p.245)
"
# ╔═╡ b14d83b4-0dc3-4893-a645-fd69d9fb15e6
begin # Thurstone, 1945, p.245, Table 1
x1 = Array{Real, 2}(undef, (9, 3))
#------------------------------------------------------------------------------
x1[1,:] = [.04, .03, .00]
x1[2,:] = [.16, .13, .02]
x1[3,:] = [.13, .18, .14]
x1[4,:] = [.11, .17, .34]
x1[5,:] = [.08, .15, .34]
x1[6,:] = [.06, .12, .14]
x1[7,:] = [.11, .10, .02]
x1[8,:] = [.19, .08, .00]
x1[9,:] = [.12, .04, .00]
size(x1)
end # begin
# ╔═╡ f2f78b7e-6a68-4fa3-846d-95bb979794c2
function plotRatingDistributions1(x::Array, title; maxY=0.4)
(nRows, nCols) = size(x)
nCats = nRows # number of rating categories
nStims = nCols # number of stimuli
xs = 1:1:nCats
#----------------------------------------------------------------------------
plot(xs, x[:, 1], title=title, xlimits=(0.5, nCats+.5), xticks=:1:1:nCats, ylimits=(-0.05, maxY), seriestype=:scatter, colour=:blue, label="S1", xlabel=L"positive affect rating category $m$", ylabel = L"P(m)")
if nStims == 3
#------------------------------------------------------------------------
plot!(xs, x[:, 2], seriestype=:scatter, colour=:red, label="S2")
plot!(xs, x[:, 3], seriestype=:scatter, colour=:green, label="S3")
#------------------------------------------------------------------------
plot!(xs, x[:, 1], seriestype=:line, colour=:blue, label="S1")
plot!(xs, x[:, 2], seriestype=:line, colour=:red, label="S2")
plot!(xs, x[:, 3], seriestype=:line, colour=:green, label="S3")
#------------------------------------------------------------------------
elseif nStims == 4
#------------------------------------------------------------------------
plot!(xs, x[:, 2], seriestype=:scatter, colour=:red, label="S2")
plot!(xs, x[:, 3], seriestype=:scatter, colour=:green, label="S3")
#------------------------------------------------------------------------
plot!(xs, x[:, 1], seriestype=:line, colour=:blue, label="S1")
plot!(xs, x[:, 2], seriestype=:line, colour=:red, label="S2")
plot!(xs, x[:, 3], seriestype=:line, colour=:green, label="S3")
#------------------------------------------------------------------------
plot!(xs, x[:, 4], seriestype=:scatter, colour=:orange, label="S4")
plot!(xs, x[:, 4], seriestype=:line, colour=:orange, label="S4")
end # if
#----------------------------------------------------------------------------
end # function plotRatingDistributions1
# ╔═╡ f5aabcd1-58af-418d-82f1-f25bfa171b3f
plotRatingDistributions1(x1,"Thurstone's 1st Ex.(p.245, tab.1, without Controv. S4) ")
# ╔═╡ 3832f1e1-ee46-4a64-9d56-d9698ffc5b20
function discreteVotingModel(x::Array, title; plimit=1.0)
let (nRows, nCols) = size(x)
m = nRows # number of rating categories
k = nCols # number of stimuli
sumOfProbs = zeros(Float64, k)
while !(sum(sumOfProbs) > plimit || m < 1)
for j in 1:k
sumOfProbs[j] += x[m,j]
end # for j
m -= 1
end # while
sumOfProbs = map(x -> x/sum(sumOfProbs), sumOfProbs) # normalization of probs
title, m-1, sumOfProbs, sum(sumOfProbs)
end # let
end # function discreteVotingModel
# ╔═╡ 71d9d34c-c674-4168-9ecb-52a42c45684d
discreteVotingModel(x1, "Voting Model: Thurstone's 1st data set")
# ╔═╡ e9c53501-7ba2-4393-bdee-f609e371a5ac
function modelComparison1(title, x, y ; xlabel="VotingModel", ylabel="Thurstone Model")
let rxy = trunc(Statistics.cor(x, y), digits=3)
xs = [0, 1]; ys = [0, 1]
#---------------------------------------------------------------------------
plot(xs, ys, xlims=(-0.01, 1.0), ylims=(-0.01, 1.0), title=title, seriestype=:line, colour=:red, xlabel=xlabel, ylabel=ylabel, label="line of perfect model agreement")
#---------------------------------------------------------------------------
plot!((x[1], y[1]), seriestype=:scatter, colour=:blue, label="S1")
plot!((x[2], y[2]), seriestype=:scatter, colour=:red, label="S2")
plot!((x[3], y[3]), seriestype=:scatter, colour=:green, label="S3")
if length(x) == 4
plot!((x[4], y[4]), seriestype=:scatter, colour=:orange, label="S4")
end # if
#---------------------------------------------------------------------------
annotate!([(0.7, 0.1, "r(model_1, model_2) = $rxy")])
end # let
end # function modelComparison1
# ╔═╡ 403c8660-75cb-4866-9a70-e822b425c797
modelComparison1("Thurstone vs Voting Model",
[0.36129, 0.316129, 0.322581], # predictions of Voting model
[0.47, 0.30, 0.23]) # predictions of Thurstone's model (Thurstone, 1945, p.247)
# ╔═╡ 1261f045-99d1-4580-a926-6bba736ec1fe
md"
Though we have a high positive correlation between the predictions of Thurstone's and our voting model they are not perfect. This seems due to the fact that Thurstone's model predictions are more *discriminative* between the stimuli than our voting model.
The reason for this is hat Thurstone's model favours stimuli which a *controversal* ; that is stimuli with wide (e.g. *uniform*) discriminal dispersions.
A test for the cognitive validity of either model cannot be made here because Thurstone's 'data' are only artificial and not behavioral. The empirical test has to wait until we study our own data set (Ahrens & Möbus, 1968).
"
# ╔═╡ 4460796f-06bc-4cb2-9121-c1ff56dca37c
md"
---
###### Thurstone's 1st Data Set augmented with a *Controversal* Stimulus
(with Uniform Ratings)
"
# ╔═╡ 28e1cf28-49f4-4488-946c-0c4fcd0689d1
begin
x2 = Array{Real, 2}(undef, (9, 4))
#------------------------------------------------------------------------------
x2[1,:] = [.04, .03, .00, 1/9]
x2[2,:] = [.16, .13, .02, 1/9]
x2[3,:] = [.13, .18, .14, 1/9]
x2[4,:] = [.11, .17, .34, 1/9]
x2[5,:] = [.08, .15, .34, 1/9]
x2[6,:] = [.06, .12, .14, 1/9]
x2[7,:] = [.11, .10, .02, 1/9]
x2[8,:] = [.19, .08, .00, 1/9]
x2[9,:] = [.12, .04, .00, 1/9]
size(x2)
end # begin
# ╔═╡ 3b5f6d28-7fde-4492-aa10-da11539a1b77
plotRatingDistributions1(x2, "Thurstone's 1st Ex.(p.245, tab.1, incl. Controv. S4)")
# ╔═╡ 9d3e5de5-d489-4bea-af40-d55d3ce076fd
discreteVotingModel(x2, "Voting Model: Thurstone's 1st augmented data set")
# ╔═╡ 23cb6297-23b8-4cd2-8af4-529e25511f65
md"
The introduction of a maximal *controversal* stimulus distracts preference mass mainly from the most *non*controversal stimus $S_3$:
$\;$
$P(S_1 > S_2, S_3) = 0.36129 > P(S_1 > S_2, S_3, S_4) = 0.336973$
$P(S_2 > S_1, S_3) = 0.316129 > P(S_2 > S_1, S_3, S_4) = 0.23869$
$P(S_3 > S_1, S_2) = 0.322581 >> P(S_3 > S_1, S_2, S_4) = 0.112324$
$P(S_4 > S_1, S_2, S_3) = 0.312012$
$\;$
Stimulus $S_3$ is the main loser when the *controversal* stimulus $S_4$ is introduced into the set of alternatives. This can be seen in the correlational diagram of $modelComparison1$ (below).
$\;$
"
# ╔═╡ bac5626f-f5a5-4077-a42a-db1b768754c5
modelComparison1("Voting Model: S3 vs S4",
[0.36129, 0.316129, 0.322581], #, 0.0000], # voting model for S3
[0.336973, 0.23869, 0.112324], #, 0.31201]) # voting model for S4
xlabel="Voting Model for S4",
ylabel="Voting Model for S3")
# ╔═╡ 45815157-d41f-4c1a-b08d-8a511aacc420
md"
---
##### 1.3.1.2.3 Nonparametric Voting Model: *Thurstone*'s 2nd Numerical Example
Now, we apply the *nonparametric* version of our voting model to Thurstone's second set of articial data. He motivates the characterics of his dataset as: *... we have a numerical example of the theorem that when two psychological objects are tied in average popularity, as measured by the mean scale positions $S_i$ and $S_j$, then the more variable of them can win election for first choice by the introduction of a third competing object of lower average popularity. Here we used 24 successive intervals. All three of these affective distributions were made Gaussian, and it is here assumed that the distributions are at least roughly symmetric. The first two candidates are the leading ones that are tied. The third candidate has a lower average popularity* ...(Thurstone, 1945, p.247)
"
# ╔═╡ f447a7ca-ec7d-4584-ad15-1ee482186e61
begin
x31 = Array{Real, 2}(undef, (24, 3))
#------------------------------------------------------------------------------
x31[ 1,:] = [.00, .00, .00]
x31[ 2,:] = [.01, .00, .00]
x31[ 3,:] = [.00, .00, .00]
x31[ 4,:] = [.01, .00, .01]
x31[ 5,:] = [.02, .00, .01]
x31[ 6,:] = [.03, .00, .05]
x31[ 7,:] = [.04, .01, .09]
x31[ 8,:] = [.05, .01, .15]
x31[ 9,:] = [.07, .05, .19]
x31[10,:] = [.08, .09, .19]
x31[11,:] = [.09, .15, .15]
x31[12,:] = [.10, .19, .09]
x31[13,:] = [.10, .19, .05]
x31[14,:] = [.09, .15, .01]
x31[15,:] = [.08, .09, .01]
x31[16,:] = [.07, .06, .00]
x31[17,:] = [.05, .01, .00]
x31[18,:] = [.04, .01, .00]
x31[19,:] = [.03, .00, .00]
x31[20,:] = [.02, .00, .00]
x31[21,:] = [.01, .00, .00]
x31[22,:] = [.00, .00, .00]
x31[23,:] = [.01, .00, .00]
x31[24,:] = [.00, .00, .00]
size(x31)
end # begin
# ╔═╡ 8a3b8b4d-e92f-437e-bc28-245c72aaad88
plotRatingDistributions1(x31, "Thurstone's 2nd Example (Tab 2; without Controv. S4)", maxY=0.25)
# ╔═╡ 593f000d-0e11-4a08-a7b4-e737dc3bda2b
discreteVotingModel(x31, "Thurstone's 2nd Example (Tab 2; without Controv. S4)")
# ╔═╡ 1f445771-32a8-41d8-8faf-519fdab8ac93
modelComparison1("Voting vs Thurstone Mod (Thurst.'s 2nd data no S4)",
[.48, .45, .07], # Thurstone Model (Thurstone, 1945, p.247)
[0.462963, 0.472222, 0.0648148], # Voting Model
xlabel="Thurstone Model",
ylabel="Voting Model")
# ╔═╡ c4898b15-d8d8-4201-bfbe-bf16fbc7ecaf
begin
x32 = Array{Real, 2}(undef, (24, 4))
#------------------------------------------------------------------------------
x32[ 1,:] = [.00, .00, .00, 1/24]
x32[ 2,:] = [.01, .00, .00, 1/24]
x32[ 3,:] = [.00, .00, .00, 1/24]
x32[ 4,:] = [.01, .00, .01, 1/24]
x32[ 5,:] = [.02, .00, .01, 1/24]
x32[ 6,:] = [.03, .00, .05, 1/24]
x32[ 7,:] = [.04, .01, .09, 1/24]
x32[ 8,:] = [.05, .01, .15, 1/24]
x32[ 9,:] = [.07, .05, .19, 1/24]
x32[10,:] = [.08, .09, .19, 1/24]
x32[11,:] = [.09, .15, .15, 1/24]
x32[12,:] = [.10, .19, .09, 1/24]
x32[13,:] = [.10, .19, .05, 1/24]
x32[14,:] = [.09, .15, .01, 1/24]
x32[15,:] = [.08, .09, .01, 1/24]
x32[16,:] = [.07, .06, .00, 1/24]
x32[17,:] = [.05, .01, .00, 1/14]
x32[18,:] = [.04, .01, .00, 1/24]
x32[19,:] = [.03, .00, .00, 1/24]
x32[20,:] = [.02, .00, .00, 1/24]
x32[21,:] = [.01, .00, .00, 1/24]
x32[22,:] = [.00, .00, .00, 1/24]
x32[23,:] = [.01, .00, .00, 1/24]
x32[24,:] = [.00, .00, .00, 1/24]
size(x32)
end # begin
# ╔═╡ 160981ae-22a9-4ae9-a7a3-935e0a4b39fe
plotRatingDistributions1(x32, "Thurstone's 2nd Example (Tab 2; incl. Controv. S4)", maxY=0.25)
# ╔═╡ a10bd04c-265f-4b78-9f24-cdbb73e47020
discreteVotingModel(x32, "Thurstone's 2nd Example (Tab 2; incl. Controv. S4)")
# ╔═╡ 0a3a119b-211a-4dc4-bc6f-e7860b81e050
modelComparison1("Voting vs Thurstone Mod (Thurst.'s 2nd data incl S4)",
[.48, .45, .07], #, 00], # Thurstone Model
[0.325708, 0.260566, 0.0162854], #, 0.397441], # Voting Model
xlabel="Thurstone Model",
ylabel="Voting Model")
# ╔═╡ 85331d45-5442-4b01-9c84-5105b7a0ab53
md"
**Summary**: Applied to Thurstone's own published demo data *both* models *agree* perfectly. This coincidence is deteriorated when studying empirical data (below).
"
# ╔═╡ 9babe800-837a-4d2b-9d59-7ed2d4419719
md"
---
##### 1.3.1.2.4 Nonparametric Voting Model: Attitude and 1st Choice Data
(Ahrens & Möbus, 1968)
"
# ╔═╡ bcf2c87b-64ed-4aee-a666-b96dd65a5373
md"
---
##### Empirical Validity of Thurstone Model
(Ahrens & Möbus, 1968)
"
# ╔═╡ 646d5dc9-22ea-40f7-a3d0-d5870e9106e0
md"
Best predictors when using the Thurstone model are *Sencerity/Honesty* ($r=.944$), *Liberality* ($r=.922$), and *Objectivity* ($r=.868$).
"
# ╔═╡ 06e7e9c9-123c-440f-b702-55aca1efdc73
md"
---
##### Empirical Validity of Voting Model
"
# ╔═╡ 431f0ea9-d09e-4983-978f-5791daf853d0
md"
Best predictors when using the Voting model are *Civil Courage* ($r=.647$) and to a lesser degree *Intelligence* ($r=.424$). Both models seem to make different prediction when using correlations as a judgmental basis.
"
# ╔═╡ f5530e6d-179c-4153-8009-d7713d162c03
md"
---
##### Model Predictions for Thurstone and Voting Model
(Data are obtained from Ahrens & Möbus, 1968, p.558, Tab. 3)
###### 1. Civil Courage Ratings
"
# ╔═╡ 4b9757c8-88e4-4b3a-a435-a647431e713f
begin # Civil Courage; Ahrens & Möbus, 1968, p.558, table 3
S1 = Array{Real, 2}(undef, (7, 6))
S1[:, 1] = [.00, .00, .10, .14, .29, .43, .05] # column 1
S1[:, 2] = [.00, .00, .00, .05, .19, .48, .29] # column 2
S1[:, 3] = [.00, .00, .05, .00, .33, .52, .10] # ...
S1[:, 4] = [.00, .00, .10, .10, .19, .38, .24] # ...
S1[:, 5] = [.00, .00, .00, .05, .57, .33, .05] # ...
S1[:, 6] = [.00, .05, .05, .19, .43, .29, .00] # column 6
end
# ╔═╡ 974842e6-2257-4e0f-b8c6-efe1ade14ec5
function plotRatingDistributions2(x::Array, title::String)
let (nRows, nCols) = size(x)
nStims = 6
nCats = nRows
xs = 1:1:nCats
#----------------------------------------------------------------------------
plot(xs, x[:, 1], title=title, xlimits=(0, 8), ylimits=(-0.05, 0.60), seriestype=:scatter, colour=:aquamarine, label="S1", xlabel=L"positive affect ratings $m$", ylabel = L"P(m)")
#----------------------------------------------------------------------------
plot!(xs, x[:, 2], seriestype=:scatter, colour=:red, label="S2")
plot!(xs, x[:, 3], seriestype=:scatter, colour=:green, label="S3")
plot!(xs, x[:, 4], seriestype=:scatter, colour=:violet, label="S4")
plot!(xs, x[:, 5], seriestype=:scatter, colour=:orange, label="S5")
plot!(xs, x[:, 6], seriestype=:scatter, colour=:blue, label="S6")
#----------------------------------------------------------------------------
plot!(xs, x[:, 1], seriestype=:line, colour=:aquamarine, label="S1")
plot!(xs, x[:, 2], seriestype=:line, colour=:red, label="S2")
plot!(xs, x[:, 3], seriestype=:line, colour=:green, label="S3")
plot!(xs, x[:, 4], seriestype=:line, colour=:violet, label="S4")
plot!(xs, x[:, 5], seriestype=:line, colour=:orange, label="S5")
plot!(xs, x[:, 6], seriestype=:line, colour=:blue, label="S6")
#----------------------------------------------------------------------------
end # let
end # function plotRatingDistributions2
# ╔═╡ abd1374a-8528-4838-ba5c-924e4cfa557d
plotRatingDistributions2(S1, "Ratings: Civil Courage")
# ╔═╡ c54b5f99-4a0c-4c60-9a04-d01dc5a6664a
discreteVotingModel(S1, "Civil Courage Ratings")
# ╔═╡ 380be5b0-7d95-42f4-ac92-98d151b3851a
function modelComparison2(title, x, y ;xlabel="Voting Model", ylabel="Thurstone Model")
let rxy = trunc(Statistics.cor(x, y), digits=3)
xs = [0, 1]; ys = [0, 1]
#---------------------------------------------------------------------------
plot(xs, ys, xlims=(-0.01, 1.0), ylims=(-0.01, 1.0), title=title, seriestype=:line, colour=:red, xlabel=xlabel, ylabel=ylabel, label="line of perfect model agreement")
#---------------------------------------------------------------------------
plot!((x[1], y[1]), seriestype=:scatter, colour=:aquamarine, label="S1")
plot!((x[2], y[2]), seriestype=:scatter, colour=:red, label="S2")
plot!((x[3], y[3]), seriestype=:scatter, colour=:green, label="S3")
plot!((x[4], y[4]), seriestype=:scatter, colour=:violet, label="S4")
plot!((x[5], y[5]), seriestype=:scatter, colour=:orange, label="S5")
plot!((x[6], y[6]), seriestype=:scatter, colour=:blue, label="S6")
#---------------------------------------------------------------------------
annotate!([(0.7, 0.1, "r(model_i, model_j) = $rxy")])
end # let
end # function modelComparison2
# ╔═╡ a63ee0a9-b615-47a9-80a9-0eb46b5088f4
function dataModelComparison(title, x, y,; xlabel="Voting Model", ylabel="Ratings Data")
let rxy = trunc(Statistics.cor(x, y), digits=3)
xs = [0, 1]; ys = [0, 1]
#---------------------------------------------------------------------------
plot(xs, ys, xlims=(-0.01, 1.0), ylims=(-0.01, 1.0), title=title, seriestype=:line, colour=:red, xlabel=xlabel, ylabel=ylabel, label="line of perfect agreement")
#---------------------------------------------------------------------------
plot!((x[1], y[1]), seriestype=:scatter, colour=:aquamarine, label="S1")
plot!((x[2], y[2]), seriestype=:scatter, colour=:red, label="S2")
plot!((x[3], y[3]), seriestype=:scatter, colour=:green, label="S3")
plot!((x[4], y[4]), seriestype=:scatter, colour=:violet, label="S4")
plot!((x[5], y[5]), seriestype=:scatter, colour=:orange, label="S5")
plot!((x[6], y[6]), seriestype=:scatter, colour=:blue, label="S6")
#---------------------------------------------------------------------------
annotate!([(0.7, 0.1, "r(rating_i, model) = $rxy")])
end # let
end # function modelDataComparison
# ╔═╡ ea394cd7-e6d0-4e55-8e19-4ff8150a3bfa
dataModelComparison("1st Choice Data - Thurstone Mod.(Sincerity/Honesty)",
[.33, .20, .29, .10, .08, .00], # Thurstone Model (Ahrens&Möbus,1968,p.560,Tab. 4)
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ eacd0bc1-6579-4395-be97-9a4221c3ab22
dataModelComparison("1st Choice Data vs. Thurstone Model (Liberality)",
[.25, .23, .21, .19, .09, .03], # Thurstone Model (Ahrens&Möbus,1968,p.560,Tab. 4)
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ 04ccd8a7-c97b-4760-bf2b-948dc20409f6
dataModelComparison("1st Choice Data vs. Thurstone Model (Objectivity)",
[.29, .16, .22, .14, .14, .05], # Thurstone Model (Ahrens&Möbus,1968,p.560,Tab. 4)
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ 8cedc30e-6061-4f0b-8fa4-5af825725ea8
dataModelComparison("1st Choice Data vs. Thurstone Model (Intelligence)",
[.26, .04, .15, .22, .07, .26], # Thurstone Model (Ahrens&Möbus,1968,p.560,Tab. 4)
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ bc5ad309-5b2f-41a4-ad5f-04dd49b410a6
dataModelComparison("1st Choice Data vs. Thurstone Model (Civil Courage)",
[.11, .10, .06, .31, .17, .25], # Thurstone Model (Ahrens&Möbus,1968,p.560,Tab. 4)
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ 70c13302-72e0-4dbb-85c0-3cee2a9f0272
dataModelComparison("1st Choice Data vs. Voting Model (Sincerity/Honesty)",
[.296296, .117284, .0617284, .0, .234568, .290123], # voting model predictions
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ 77547f53-7ba4-4e3f-ae8e-6206cc83c747
dataModelComparison("1st Choice Data vs. Voting Model (Liberality)",
# Voting Model on basis of liberality ratings
[.248705, .176166, .0984456, .0259067, .222798, .227979], # voting model preds.
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ 980ca609-9885-44aa-99b8-035d697f2610
dataModelComparison("1st Choice Data vs. Voting Model (Objectivity Ratings)",
[0.239819, 0.171946, 0.108597, 0.0452489, 0.19457, 0.239819], # voting model
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ f917eefe-50cf-47f9-9c6b-a19f15ca17c0
dataModelComparison("1st Choice Data vs. Voting Model (Intelligence)",
# Voting Model on basis of intelligence ratings
[0.286957, 0.252174, 0.0434783, 0.252174, 0.0, 0.165217], # voting model pred.
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ 9441e656-5fb4-45f2-b4e9-394a08110d6a
dataModelComparison("1st Choice Data vs. Voting Model (Civil Courage)",
# Voting Model predictions on basis of civil courage ratings
[.151899, .243671, .196203, .196203, .120253, .0917722],
[.33, .28, .24, .10, .05, .00]) # 1st choice data (Ahrens&Möbus,1968,p.560,Tab. 4)
# ╔═╡ ca2c9228-9367-4c62-9e77-8756675e94cf
modelComparison2("Thurstone vs Voting Model (Civil Courage Rating)",
[0.151899, 0.243671, 0.196203, 0.196203, 0.120253,0.0917722], # voting model
[.11, .10, .06, .31, .17, .25]) # Thurstone Model(Ahrens&Möbus,1968,p.560, Tab.4)
# ╔═╡ fdab7ae8-3acc-4ce3-8506-e47a4137b94d
md"
---
###### Liberality Ratings
"
# ╔═╡ d56c1cde-d025-4d8b-be57-33651f8a58cd
begin # Liberality Ratings; Ahrens & Möbus, 1968, p.558, table 3
S2 = Array{Real, 2}(undef, (7, 6))
S2[:, 1] = [.00, .00, .05, .14, .33, .43, .05] # column 1
S2[:, 2] = [.00, .14, .10, .29, .14, .24, .10] # column 2
S2[:, 3] = [.00, .10, .24, .24, .24, .19, .00] # ...
S2[:, 4] = [.19, .29, .24, .10, .14, .05, .00] # ...
S2[:, 5] = [.00, .05, .05, .05, .43, .38, .05] # ...
S2[:, 6] = [.00, .00, .10, .29, .19, .38, .06] # column 6
end
# ╔═╡ 04aa6f0c-50bd-44b5-980e-a0562e27091b
discreteVotingModel(S2, "Liberality Ratings")
# ╔═╡ ada73f85-c8f1-4baa-9898-0f89dd1c0795
plotRatingDistributions2(S2, "Liberality Ratings")
# ╔═╡ 4a80b025-44b9-492b-9cd3-71550666c7b4
discreteVotingModel(S2, "Liberality Ratings")
# ╔═╡ 54c8c91c-2397-433a-9d3f-cd337154bd0b
modelComparison2("Thurstone vs Voting Model (Liberality Ratings)",
[.248705, .176166, .0984456, .0259067, .222798, .227979], # voting model
[.25, .23, .21, .19, .09, .03]) # Thurstone Model(Ahrens&Möbus,1968,p.560, Tab.4)
# ╔═╡ d4d328c9-15bd-4538-a3ce-e16f951377a8
md"
---
###### Sincerity/Honesty Ratings
"
# ╔═╡ 31b9f7ad-95cb-404e-affb-de0cefea0ade
begin # Sincerity/Honesty Ratings; Ahrens & Möbus, 1968, p.558, table 3
S3 = Array{Real, 2}(undef, (7, 6))
S3[:, 1] = [.00, .00, .14, .14, .24, .29, .19] # column 1
S3[:, 2] = [.00, .00, .19, .24, .38, .19, .00] # column 2
S3[:, 3] = [.00, .10, .14, .29, .38, .05, .05] # ...
S3[:, 4] = [.38, .29, .19, .10, .05, .00, .00] # ...
S3[:, 5] = [.00, .05, .00, .24, .33, .33, .05] # ...
S3[:, 6] = [.00, .00, .05, .24, .24, .33, .14] # column 6
end
# ╔═╡ d1b49b43-f76a-492d-a90c-cc0f718be634
discreteVotingModel(S3, "Sincerity/Honesty Ratings")
# ╔═╡ 7af53601-021c-4560-8ebd-df89d8c17f47
plotRatingDistributions2(S3, "Sincerity/Honesty Ratings")
# ╔═╡ b8c1072f-c206-4c0b-a088-f10c1491f80a
discreteVotingModel(S3, "Sincerity/Honesty Ratings")
# ╔═╡ 80f8adc0-1e60-4008-9963-36d4ead0f562
modelComparison2("Thurstone - Voting Model(Sincerety/Honesty Ratings)",
# Voting Model on basis of sincerety/honesty ratings
[0.296296, 0.117284, 0.0617284, 0.0, 0.234568, 0.290123], # voting model
[.33, .20, .29, .10, .08, .00]) # Thurstone Model(Ahrens&Möbus,1968,p.560, Tab.4)
# ╔═╡ b3af7545-e622-417b-b56b-6047d50127ae
md"
---
###### Intelligence Ratings
"
# ╔═╡ f8f2b229-efcd-4a0d-a62c-744e1cb8618a
begin # Intelligence
S4 = Array{Real, 2}(undef, (7, 6))
S4[:, 1] = [.00, .00, .00, .05, .29, .33, .33] # column 1
S4[:, 2] = [.00, .00, .10, .10, .24, .29, .29] # column 2
S4[:, 3] = [.00, .00, .05, .14, .43, .33, .05]
S4[:, 4] = [.00, .00, .00, .10, .19, .52, .29]
S4[:, 5] = [.00, .00, .10, .14, .43, .33, .00]
S4[:, 6] = [.00, .00, .00, .14, .43, .24, .19] # column 6
end
# ╔═╡ 50aef5d2-5b53-4abf-91e9-26b9a91021c9
discreteVotingModel(S4, "Voting Model: Intelligence Ratings")
# ╔═╡ 578a003d-8e39-4dd7-939d-0b4a95040a8a
plotRatingDistributions2(S4, "Intelligence Ratings")
# ╔═╡ 5943c7df-8512-428f-ac17-e16c5450fdd1
discreteVotingModel(S4, "Voting Model: Intelligence Ratings")
# ╔═╡ 2c0d5a84-6dfa-4855-ba41-764cb3d7b078
modelComparison2("Thurstone vs Voting Model (Intelligence Ratings)",
# Voting Model on basis of intelligence ratings
[0.286957, 0.252174, 0.0434783, 0.252174, 0.0, 0.165217], # voting model
[.26, .04, .15, .22, .07, .26]) # Thurstone Model(Ahrens&Möbus,1968,p.560, Tab.4)
# ╔═╡ 01780409-edab-4bcd-8569-e9ee23c9587e
md"
---
###### Objectivity Ratings
"
# ╔═╡ 8c661015-9dfc-4d82-a81c-4fb79cb206e5
begin # Objectivity
S5 = Array{Real, 2}(undef, (7, 6))
S5[:, 1] = [.00, .05, .10, .14, .19, .29, .24] # column 1
S5[:, 2] = [.00, .00, .19, .19, .24, .33, .05] # column 2
S5[:, 3] = [.00, .10, .29, .19, .19, .10, .14]
S5[:, 4] = [.00, .38, .05, .14, .33, .05, .05]
S5[:, 5] = [.00, .05, .10, .10, .33, .38, .05]
S5[:, 6] = [.00, .00, .14, .10, .24, .43, .10] # column 6
end
# ╔═╡ 16f1cd7b-c994-4271-b556-2bf520c925f3
discreteVotingModel(S5, "Objectivity Ratings")
# ╔═╡ 7de57a9f-fcab-45b4-aec3-7ac478271ba6
plotRatingDistributions2(S4, "Objectivity Ratings")
# ╔═╡ c2b421f4-80f6-4c88-bdae-e4cd97e9c2a5
discreteVotingModel(S5, "Objectivity Ratings")
# ╔═╡ 8c21b6ed-9f4c-4e08-9d88-2c621a68394a
modelComparison2("Thurstone vs Voting Model (Objectivity Ratings)",
[0.239819, 0.171946, 0.108597, 0.0452489, 0.19457, 0.239819],
[.11, .10, .06, .31, .17, .25]) # Thurstone Model(Ahrens&Möbus,1968,p.560, Tab.4)
# ╔═╡ f997f20d-26ba-4ad4-98cd-d050a10c8e30
md"
Both models seem to make very different predictions when we look at correlations as a judgmental basis. The highest congruence between both models is on the basis of *Intelligence* ratings ($r=0.399$).
As a resume we can say that Thurstone's model seems to have a higher empirical validity than the voting model. The reason for this may be the fact that Thurstone's model exploits information not only from the extreme positive parts of the discriminal dispersions but from the *whole* distributions.
"
# ╔═╡ bbd5f8c0-cfb6-4417-a8d2-5474bf0b4cb1
md"
---
##### References
- **Ahrens, H.J. & Möbus, C.**; [*Zur Verwendung von Einstellungsmessungen bei der Prognose von Wahlentscheidungen*](http://oops.uni-oldenburg.de/2729/1/PCM1968.pdf); Zeitschrift für Experimentelle und Angewandte Psychologie, 1968, Band XV. Heft 4. S.543-563; last visit: 2023/09/08
- **Thurstone, L.L.**; [*The Prediction of Choice*](https://link.springer.com/content/pdf/10.1007/BF02288891.pdf); Psychometrika 10.4 (1945): 237-253; ; last visit 2023/09/08
"
# ╔═╡ 6f837e7f-3085-411b-b39f-c63f7fcf2939
md"
---
##### end of ch. 1.3.1.2
"
# ╔═╡ 517c3c8f-be36-49b2-98da-b9f2d67ddcf8
md"
====================================================================================
This is a **draft** under the Attribution-NonCommercial-ShareAlike 4.0 International **(CC BY-NC-SA 4.0)** license. Comments, suggestions for improvement and bug reports are welcome: **claus.moebus(@)uol.de**
====================================================================================
"
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[compat]
LaTeXStrings = "~1.3.0"
Plots = "~1.39.0"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.9.3"
manifest_format = "2.0"
project_hash = "45d9cde2c96eb17dd3a7c5e57287d25dd3077c27"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.BitFlags]]
git-tree-sha1 = "43b1a4a8f797c1cddadf60499a8a077d4af2cd2d"
uuid = "d1d4a3ce-64b1-5f1a-9ba4-7e7e69966f35"
version = "0.1.7"
[[deps.Bzip2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "19a35467a82e236ff51bc17a3a44b69ef35185a2"
uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0"
version = "1.0.8+0"
[[deps.Cairo_jll]]
deps = ["Artifacts", "Bzip2_jll", "CompilerSupportLibraries_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "4b859a208b2397a7a623a03449e4636bdb17bcf2"
uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a"
version = "1.16.1+1"
[[deps.CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "02aa26a4cf76381be7f66e020a3eddeb27b0a092"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.2"
[[deps.ColorSchemes]]
deps = ["ColorTypes", "ColorVectorSpace", "Colors", "FixedPointNumbers", "PrecompileTools", "Random"]
git-tree-sha1 = "d9a8f86737b665e15a9641ecbac64deef9ce6724"
uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4"
version = "3.23.0"
[[deps.ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "eb7f0f8307f71fac7c606984ea5fb2817275d6e4"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.4"
[[deps.ColorVectorSpace]]
deps = ["ColorTypes", "FixedPointNumbers", "LinearAlgebra", "Requires", "Statistics", "TensorCore"]
git-tree-sha1 = "a1f44953f2382ebb937d60dafbe2deea4bd23249"
uuid = "c3611d14-8923-5661-9e6a-0046d554d3a4"
version = "0.10.0"
[deps.ColorVectorSpace.extensions]
SpecialFunctionsExt = "SpecialFunctions"
[deps.ColorVectorSpace.weakdeps]
SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
[[deps.Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "fc08e5930ee9a4e03f84bfb5211cb54e7769758a"