-
Notifications
You must be signed in to change notification settings - Fork 0
/
04_bayesian_models.R
145 lines (120 loc) · 6.56 KB
/
04_bayesian_models.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
df_merged %<>% within(., {
cGrammatical <- ifelse(grammatical == "grammatical", .5, -.5)
cUngrammatical <- ifelse(grammatical == "ungrammatical", .5, -.5)
cAttractorPlural <- ifelse(attractor_num == "plural", .5, -.5)
cEndsInConsonant <- ifelse(experiment != "Experiment 1", .5, -.5)
#cFreqlog_n1n2 <- scale(freqlog_n1n2)
#cFreqlog_n1 <- scale(freqlog_n1)
#cFreqlog_n2 <- scale(freqlog_n2)
})
priors <- c(set_prior("student_t(3,0,2.5)", class = "Intercept"),
set_prior("normal(0,1)", class = "b"),
set_prior("cauchy(0,1)", class = "sd"),
set_prior("lkj(2)", class = "cor"))
df_merged_nofillers <- df_merged %>% subset(is.na(source) | source != "filler")
# RUN WITH PRIORS!
models <- c()
n_chains <- 4
n_cores <- 4
n_iter <- 2000
n_warmup <- 1000
fname_responses <- "./R/fits/responses_priors"
fname_responses_stan <- "./R/models/responses_priors.stan"
m_responses <- brm(ResponseYes ~ cEndsInConsonant * cGrammatical * cAttractorPlural +
(cGrammatical * cAttractorPlural + 1| subject) +
(cGrammatical * cAttractorPlural + 1| item),
data = df_merged_nofillers,
prior = priors,
family = bernoulli("probit"),
chains = n_chains, cores = n_cores, iter = n_iter, warmup = n_warmup, init_r = .1,
file = fname_responses, save_model = fname_responses_stan)
models[['responses']] <- m_responses
fname_ungram_responses <- "./R/fits/ungram_responses_priors"
fname_ungram_responses_stan <- "./R/models/ungram_responses_priors.stan"
m_ungram_responses <- brm(ResponseYes ~ cEndsInConsonant * cAttractorPlural +
(cAttractorPlural + 1| subject) +
(cAttractorPlural + 1| item),
data = df_merged_nofillers %>% subset(grammatical != "grammatical"),
family = bernoulli("probit"),
prior = priors,
chains = n_chains, cores = n_cores, iter = n_iter, warmup = n_warmup, init_r = .1,
file = fname_ungram_responses, save_model = fname_ungram_responses_stan)
models[['ungram_responses']] <- m_ungram_responses
tables <- vector('list', length(models))
for (i in seq_along(models)) {
fname <- paste0('results_table_', names(models[i]))
tables[[fname]] <- fixef(models[[i]], summary = T, robust = F) %>%
as.data.frame() #%>% tibble::rownames_to_column("variables")
}
{
if (exp1only == T) {
priors <- c(set_prior("student_t(3,0,2.5)", class = "Intercept"),
set_prior("normal(0,1)", class = "b", coef = "freqlog_n1n2"),
set_prior("normal(0,1)", class = "b", coef = "cGrammatical"),
set_prior("normal(0,1)", class = "b", coef = "cAttractorPlural"),
set_prior("normal(0,1)", class = "b", coef = "cGrammatical:freqlog_n1n2"),
set_prior("normal(0,1)", class = "b", coef = "cAttractorPlural:freqlog_n1n2"),
set_prior("normal(0,1)", class = "b", coef = "cGrammatical:cAttractorPlural"),
set_prior("normal(0,1)", class = "b", coef = "cGrammatical:cAttractorPlural:freqlog_n1n2"),
set_prior("cauchy(0,1)", class = "sd"),
set_prior("lkj(2)", class = "cor"))
fname_responses_exp1only <- "./R/fits/responses_exp1only_priors"
fname_responses_exp1only_stan <- "./R/models/responses_exp1only_priors.stan"
m_responses_exp1only <-
brm(ResponseYes ~ cGrammatical * cAttractorPlural * freqlog_n1n2 +
(cGrammatical * cAttractorPlural * freqlog_n1n2 + 1| subject) +
(cGrammatical * cAttractorPlural + 1| item),
data = df_merged_nofillers %>% subset(experiment == "Experiment 1"),
family = bernoulli("probit"),
prior = priors,
chains = n_chains, cores = n_cores, iter = n_iter, warmup = n_warmup, init_r = .1,
file = fname_responses_exp1only, save_model = fname_responses_exp1only_stan
)
models[['responses_exp1']] <- m_responses_exp1only
}
}
{
if (lagoonly == T) {
priors <- c(set_prior("student_t(3,0,2.5)", class = "Intercept"),
set_prior("normal(0,1)", class = "b", coef = "freqlog_n1n2"),
set_prior("normal(0,1)", class = "b", coef = "cGrammatical"),
set_prior("normal(0,1)", class = "b", coef = "cAttractorPlural"),
set_prior("normal(0,1)", class = "b", coef = "cGrammatical:freqlog_n1n2"),
set_prior("normal(0,1)", class = "b", coef = "cAttractorPlural:freqlog_n1n2"),
set_prior("normal(0,1)", class = "b", coef = "cGrammatical:cAttractorPlural"),
set_prior("normal(0,1)", class = "b", coef = "cGrammatical:cAttractorPlural:freqlog_n1n2"),
set_prior("cauchy(0,1)", class = "sd"),
set_prior("lkj(2)", class = "cor"))
fname_responses_Lagoonly <- "./R/fits/responses_Lagoonly_priors"
fname_responses_Lagoonly_stan <- "./R/models/responses_Lagoonly_priors.stan"
m_responses_Lagoonly <-
brm(ResponseYes ~ cGrammatical * cAttractorPlural * freqlog_n1n2 +
(cGrammatical * cAttractorPlural * freqlog_n1n2 + 1| subject) +
(cGrammatical * cAttractorPlural + 1| item),
data = df_merged_nofillers %>% subset(experiment != "Experiment 1"),
family = bernoulli("probit"),
prior = priors,
chains = n_chains, cores = n_cores, iter = n_iter, warmup = n_warmup, init_r = .1,
file = fname_responses_Lagoonly, save_model = fname_responses_Lagoonly_stan)
models[['responses_lago']] <- m_responses_Lagoonly
}
}
{
if (rt == T) {
priors <- c(set_prior("student_t(3,0,2.5)", class = "Intercept"),
set_prior("normal(0,1)", class = "b"),
set_prior("cauchy(0,1)", class = "sd"),
set_prior("lkj(2)", class = "cor"))
fname_rt <- "./R/fits/rt_priors"
fname_rt_stan <- "./R/models/rt_priors.stan"
m_rts <- brm(RT ~ cEndsInConsonant * cGrammatical * cAttractorPlural * freqlog_n1n2 +
(cGrammatical * cAttractorPlural * freqlog_n1n2 + 1| subject) +
(cGrammatical * cAttractorPlural + 1| item),
data = df_merged_nofillers,
family = lognormal(),
prior = priors,
chains = n_chains, cores = n_cores, iter = n_iter, warmup = n_warmup,
file = fname_rt, save_model = fname_rt_stan)
models[['rt']] <- m_rts
}
}