Create an object called lokfa_EN containing items
#to select items in x columns: lokfa_EN <- Topic_1_lok[,15:25]
#specific: lokfa_EN <- Topic_1_lok[,c("lokfa1", "lokfa2", "lokfa3", "lokfa4", "lokfa5", "lokfa6")] #etc
obtain the scale scores (we are interested in the means of the items here)
scale_scores <- rowMeans(lokfa_EN)
#can also be used when the sum is more important, but not optimal if diff no. of items answered
scale_scores <- rowSums(lokfa_EN)
#computing z-scores
z_score <- scale(scale_score)
#Compute T-scores (mean = 50, sd = 10)
t_score <- 10*z_score + 50
# Compute Percentiles (can add an extra step using the round() function
rank <- rank(scale_scores)
percentiles <- rank/length(scale_scores)*100
#or (longer)
percentiles <- round(100*rank(scale_score) / length(scale_score),0)
#create a data frame with scale & norm scores
table_lokfa_EN <- - data.frame(scale_score, z_score, t_score, percentile)
# View the norm table head(table_lokfa_EN)
#compute descriptives for the scale & norm scores
summary(table_lokfa_EN) #do the means look right?
sd(scale scores) #all is in order?
#create a norm table containing the scale scores & the corresponding norm scores for such scale scoes
norm_table_lokfa_EN <- unique(table_lokfa_EN[order(table_lokfa_EN$scale_score),
c("scale_score", "z_score", "t_score", "percentile")])
#make the histograms
par(mfrow=c(2,2)) #you can make histograms on top or beside eo
hist(norm_table_lokfa_EN$scale_scores)
hist(norm_table_lokfa_EN$z_scores)
hist(norm_table_lokfa_EN$t_scores)
hist(norm_table_lokfa_EN$percentiles)
Week 2: Reliability of test scores —---------------------------------------------------------------------------------
#calculate the means & sort them in ascending order
colMeans(lokfa_EN)
sort(colMeans(lokfa_EN) #sorted option
sappy(lokfa_EN, mean) #does the same thing as above
#standard deviation of the items
sapply(lokfa_EN, sd) #shortcut
sd(lokfa_EN$lokfa1, sd) #longer, looking up sd for each item
, #using the means, make pairs of items that are alike
#make two groups of items similar to eo
#balance out the split y alternating the item with the lowest mean in each of the pairs
#new data frame with reordered columns based on MEANS split
halves <- lokfa_EN[,c("lokfa11","lokfa7", "lokfa3","lokfa10", "lokfa9","lokfa4", "lokfa1","lokfa2",
"lokfa6","lokfa5")]
#check if correct split
colnames(halves)
#calculate split-half
splithalves <- items_split_half (halves) #we are interested in the Spearman-Brown value,
#calculate Cronbach’s alpha
alpha(lokfa_EN)
rxx <- alpha(lokfa_EN)$total$raw_alpha
#which item contributes the most to the reliability using the alpha results?
alpha(lokfa_EN_$alpha.drop
#deleting the items that impair the reliability of the model & redetermining
#identify what column is the item “lokfa10”
colnames(lokfa_EN) #its on the 9th column from the order
#run the alpha without lokfa10
alpha(lokfa_EN[,-9])
#if you wanna obtain the crombach’s alpha value only:
alpha(lokfa_EN[,-9])$total$raw_alpha
#using the sb formula, report the revised Cronbach's alpha reliability if we add 3 more items to og scale
rxx <- alpha(lokfa_EN)$total$raw_alpha #or you can do rxx <- (add the crombach alpha’s value here)
(k_revised <- length(lokfa_DS) + 3) #9
(k_original <- length(lokfa_DS)) #6
(n <- k_revised/k_original) #1.5
(n*rxx/(1+(n-1)*rxx))
Week 3: Validity & Classification table—---------------------------------------------------------------------------
#identify the 3 scales
head(mtmm)
learning <- mtmm[,c(1:12)]
iq <- mtmm[,c(13:24)]
learpersonality <- mtmm[,c(25:36)]
#compute the scale scores for the 3 scales
learning_scale <- rowSums(mtmm[,c(1:12)])
iq_scale <- rowSums(mtmm[,c(13:24)])
learning_personality_scale <- rowSums(mtmm[,c(25:36)])
#calculate the internal reliability scores
learning_alpha <- alpha(learning)$total$raw_alpha