% \VignetteIndexEntry{How to find Omega} % \VignettePackage{psych} % \VignetteKeywords{multivariate} % \VignetteKeyword{models} % \VignetteKeyword{Hplot} %\VignetteDepends{psych} %\documentclass[doc]{apa} \documentclass[11pt]{article} %\documentclass[11pt]{amsart} \usepackage{geometry} % See geometry.pdf to learn the layout options. There are lots. \geometry{letterpaper} % ... or a4paper or a5paper or ... %\geometry{landscape} % Activate for for rotated page geometry \usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent \usepackage{graphicx} \usepackage{amssymb} \usepackage{epstopdf} \usepackage{mathptmx} \usepackage{helvet} \usepackage{courier} \usepackage{epstopdf} \usepackage{makeidx} % allows index generation \usepackage[authoryear,round]{natbib} \usepackage{gensymb} \usepackage{longtable} %\usepackage{geometry} \usepackage{amssymb} \usepackage{amsmath} %\DeclareGraphicsRule{.tif}{png}{.png}{`convert #1 `dirname #1`/`basename #1 .tif`.png} \usepackage{Sweave} %\usepackage{/Volumes/'Macintosh HD'/Library/Frameworks/R.framework/Versions/2.13/Resources/share/texmf/tex/latex/Sweave} %\usepackage[ae]{Rd} %\usepackage[usenames]{color} %\usepackage{setspace} \bibstyle{apacite} \bibliographystyle{apa} %this one plus author year seems to work? %\usepackage{hyperref} \usepackage[colorlinks=true,citecolor=blue]{hyperref} %this makes reference links hyperlinks in pdf! \DeclareGraphicsRule{.tif}{png}{.png}{`convert #1 `dirname #1`/`basename #1 .tif`.png} \usepackage{multicol} % used for the two-column index \usepackage[bottom]{footmisc}% places footnotes at page bottom \let\proglang=\textsf \newcommand{\R}{\proglang{R}} %\newcommand{\pkg}[1]{{\normalfont\fontseries{b}\selectfont #1}} \newcommand{\Rfunction}[1]{{\texttt{#1}}} \newcommand{\fun}[1]{{\texttt{#1}\index{#1}\index{R function!#1}}} \newcommand{\pfun}[1]{{\texttt{#1}\index{#1}\index{R function!#1}\index{R function!psych package!#1}}}\newcommand{\Rc}[1]{{\texttt{#1}}} %R command same as Robject \newcommand{\Robject}[1]{{\texttt{#1}}} \newcommand{\Rpkg}[1]{{\textit{#1}\index{#1}\index{R package!#1}}} %different from pkg - which is better? \newcommand{\iemph}[1]{{\emph{#1}\index{#1}}} \newcommand{\wrc}[1]{\marginpar{\textcolor{blue}{#1}}} %bill's comments \newcommand{\wra}[1]{\textcolor{blue}{#1}} %bill's comments \newcommand{\ve}[1]{{\textbf{#1}}} %trying to get a vector command \usepackage{fancyvrb} %this allows fancy boxes \fvset{fontfamily=courier} \DefineVerbatimEnvironment{Routput}{Verbatim} %{fontsize=\scriptsize, xleftmargin=0.6cm} {fontseries=b,fontsize=\scriptsize, xleftmargin=0.1cm} \DefineVerbatimEnvironment{Binput}{Verbatim} {fontseries=b, fontsize=\scriptsize,frame=single, label=\fbox{lavaan model syntax}, framesep=2mm} %\DefineShortVerb{\!} %%% generates error! %change the definition of Sinput from Sweave \DefineVerbatimEnvironment{Sinput}{Verbatim}{fontseries=b, fontsize=\scriptsize, frame=single, label=\fbox{R code},xleftmargin=0pt, framesep=1mm} \DefineVerbatimEnvironment{Rinput}{Verbatim} %{fontsize=\scriptsize, frame=single, label=\fbox{R code}, framesep=1mm} {fontseries=b, fontsize=\scriptsize, frame=single, label=\fbox{R code},xleftmargin=0pt, framesep=1mm} \DefineVerbatimEnvironment{Link}{Verbatim} {fontseries=b, fontsize=\small, formatcom=\color{darkgreen}, xleftmargin=1.0cm} \DefineVerbatimEnvironment{Toutput}{Verbatim} {fontseries=b,fontsize=\tiny, xleftmargin=0.1cm} \DefineVerbatimEnvironment{rinput}{Verbatim} {fontseries=b, fontsize=\tiny, frame=single, label=\fbox{R code}, framesep=1mm} \newcommand{\citeti}[1]{\begin{tiny}\citep{#1}\end{tiny}} \newcommand{\light}[1]{\textcolor{gray}{#1}} \newcommand{\vect}[1]{\boldsymbol{#1}} \let\vec\vect \makeindex % used for the subject index \title{Using \R{} and the \Rpkg{psych} package to find $\omega$} \author{William Revelle\\Department of Psychology\\Northwestern University} %\affiliation{Northwestern University} %\acknowledgements{Written to accompany the psych package. Comments should be directed to William Revelle \\ \url{revelle@northwestern.edu}} %\date{} % Activate to display a given date or no date \begin{document} \maketitle \tableofcontents \newpage \section{Overview of this and related documents} To do basic and advanced personality and psychological research using \R{} is not as complicated as some think. This is one of a set of ``How To'' to do various things using \R{} \citep{R}, particularly using the \Rpkg{psych} \citep{psych} package. The current list of How To's includes: \begin{enumerate} \item An \href{http://personality-project.org/r/psych/intro.pdf}{introduction} (vignette) of the \Rpkg{psych} package \item An \href{http://personality-project.org/r/psych/overview.pdf}{overview} (vignette) of the \Rpkg{psych} package \item \href{http://personality-project.org/r/psych/HowTo/getting_started.pdf}{Installing} \R{} and some useful packages \item Using \R{} and the \Rpkg{psych} package to find \href{http://personality-project.org/r/psych/HowTo/omega.pdf}{$omega_h$} and $\omega_t$ (this document).. \item Using \R{} and the \Rpkg{psych} for \href{http://personality-project.org/r/psych/HowTo/factor.pdf}{factor analysis} and principal components analysis. \item Using the \pfun{scoreItems} function to find \href{http://personality-project.org/r/psych/HowTo/scoring.pdf}{scale scores and scale statistics}. \item Using \pfun{mediate} and \pfun{setCor} to do \href{http://personality-project.org/r/psych/HowTo/mediation.pdf}{mediation, moderation and regression analysis} \end{enumerate} \subsection{$omega_h$ as an estimate of the general factor saturation of a test} Cronbach's coefficient $alpha$ \citep{cronbach:51} is pehaps the most used (and most misused) estimate of the internal consistency of a test. $\alpha$ may be found in the \Rpkg{psych} package using the \pfun{alpha} function. However, two alternative estimates of reliability that take into account the hierarchical structure of the inventory are McDonald's $\omega_h$ and $\omega_t$ \citep{mcdonald:tt}. These may be found in \R{} in one step using one of two functions in the \Rpkg{psych} package: the \pfun{omega} function for an exploratory analysis (See Figure~\ref{fig:omega.9}) or \pfun{omegaSem} for a confirmatory analysis using the \Rpkg{sem} package solution based upon the exploratory solution from \pfun{omega}. This guide explains how to do it for the non or novice \R{} user. These set of instructions are adapted from three different sets of notes that the interested reader might find helpful: A set of slides developed for a \href{http://personality-project.org/r/aps/aps-short.pdf}{ two hour short course} in \R{} given for several years to the Association of Psychological Science as well as a \href{http://personality-project.org/r/}{short guide }to \R{} for psychologists and the \href{http://cran.r-project.org/web/packages/psych/vignettes/overview.pdf}{vignette} for the \Rpkg{psych} package. McDonald has proposed coefficient omega (hierarchical) ($\omega_h$) as an estimate of the general factor saturation of a test. \cite{zinbarg:pm:05} and \cite{rz:09} compare compare McDonald's $\omega_h$ to Cronbach's $\alpha$ and Revelle's $\beta$. They conclude that $\omega_h$ is the best estimate. (See also \cite{zinbarg:apm:06} and \cite{rz:09} as well as \cite{rc:reliability,rc:pa:19}. By following these simple guides, you soon will be able to do such things as find $\omega_{h}$ by issuing just three lines of code: \begin{Rinput} library(psych) my.data <- read.file() omega(my.data) \end{Rinput} The resulting output will be both graphical and textual. This guide helps the naive \R{} user to issue those three lines. Be careful, for once you start using \R, you will want to do more. One way to find $\omega_h$ is to do a factor analysis of the original data set, rotate the factors obliquely, factor that correlation matrix, do a Schmid-Leiman (\pfun{schmid}) transformation to find general factor loadings, and then find $\omega_h$. This is done using the \pfun{omega} function in the \Rpkg{psych} package in \R{}. This requires installing and using both \R{} as well as the \Rpkg{psych} package \citep{psych}. \subsubsection{But what about $\alpha$?} Several statistics were developed in the 1930s-1950s as short cut estimates of reliability \citep{rc:pa:19}. The approaches that consider just one test are collectively known as internal consistency procedures but also borrow from the concepts of domain sampling. Some of these techniques, e.g., \cite{cronbach:51,guttman:45,kuder:37} were developed before advances in computational speed made it trivial to find the factor structure of tests, and were based upon test and item variances. These procedures ($\alpha$, $\lambda_3$, KR20) were essentially short cuts for estimating reliability. To just find Guttman's $\lambda_3$ \citep{guttman:45} which is also known as \emph{coefficient} $\alpha$ \citep{cronbach:51}, you can use the \pfun{alpha} function or the \pfun{scoreItems} function. See the tutorial on how to use the \pfun{scoreItems} function to find \href{http://personality-project.org/r/psych/HowTo/scoring.pdf}{scale scores and scale statistics}. But, with modern computers, we can find \emph{model based} estimates that consider the factor structure of the items. $\omega_h$ and $\omega_t$ are two such model based estimates and are easy to find in \R{}. ~\ <>= library(psych) #make the psych package active library(psychTools) #make psychTools active om <- omega(Thurstone) #do the analysis om #show it @ <>= png('Thurstone.png') omega.diagram(om) dev.off() @ \begin{figure}[htbp] \begin{center} \includegraphics{Thurstone.png} \caption{$\omega_h$ is a reliability estimate of the general factor of a set of variables. It is based upon the correlation of lower order factors. It may be found in \R{} by using the \pfun{omega} function which is part of the \Rpkg{psych} package. The figure shows a solution for the \pfun{Thurstone} 9 variable data set. Compare this to the solution using the \pfun{omegaDirect} function from \cite{waller:17} (Figure~\ref{fig:direct})} \label{fig:omega.9} \end{center} \end{figure} \newpage To use \R{} obviously requires installing \R{} on your computer. This is very easy to do (see section~\ref{install}) and needs to be done once. (The following sections are elaborated in the \href{https://personality-project.org/r/psych/HowTo/getting_started.pdf}{``getting startedHow To" } . If you need more help in installing \R{} see the longer version.) The power of \R{} is in the supplemental \emph{packages}. There are at least 16,000 packages that have been contributed to the \R{} project. To do any of the analyses discussed in these ``How To's", you will need to install the package \Rpkg{psych} \citep{psych}. To do factor analyses or principal component analyses you will also need the \Rpkg{GPArotation} \citep{GPA} package. With these two packages, you will be be able to find $\omega_{h}$ using Exploratory Factor Analysis. If you want to find to estimate $\omega_h$ using Confirmatory Factor Analysis, you will also need to add the \Rpkg{lavaan} \citep{lavaan} package. To use \Rpkg{psych} to create simulated data sets, you also need the \Rpkg{mnormt} \citep{mnormt} package. For a more complete installation of a number of psychometric packages, you can install and activate a package (\Rpkg{ctv}) that installs a large set of psychometrically relevant packages. As is true for \R{}, you will need to install packages just once. \subsection{Install R for the first time} \begin{enumerate} \item Download from R Cran (\url{http://cran.r-project.org/}) \item Install R (current version is 4.0.2) \item Start \R{}. Note that the \R{} prompt $>$ starts off every line. This is \R{}'s way of indicating that it wants input. In addition, note that almost all commands start and finish with parentheses. \item Add useful packages (just need to do this once) (see section~\ref{installing}) \begin{enumerate} \begin{Rinput} install.packages("psych",dependencies=TRUE) #the minimum requirement or install.packages(c("psych","GPArotation"),dependencies=TRUE) #required for factor analysis \end{Rinput} \item or if you want to do CFA \begin{Rinput} install.packages(c("psych","lavaan"), dependencies=TRUE) \end{Rinput} \item or if you want to install the psychometric task views \begin{Rinput} install.packages("ctv") #this downloads the task view package library(ctv) #this activates the ctv package install.views("Psychometrics") #among others \end{Rinput} \end{enumerate} \item Take a 5 minute break while the packages are loaded. \item Activate the package(s) you want to use (e.g., \Rpkg{psych}) \begin{Rinput} library(psych) #Only need to make psych active once a session \end{Rinput} \Rpkg{psych} will automatically activate the other packages it needs, as long as they are installed. Note that \Rpkg{psych} is updated roughly quarterly, the current version is 2.0.8 Patches and improvements to \Rpkg{psych} (the bleeding edge version) are available from the repository at the personality-project web server and may be installed from there: ~\ \begin{Rinput} install.packages("psych", repos = "https://personality-project.org/r", type="source") \end{Rinput} %\item library(sem) \#will be used for a few examples \item Use \R{} \end{enumerate} \subsubsection{Install R } \label{install} Go to the \href{http://cran.r-project.org}{Comprehensive R Archive Network (CRAN)} at \url{http://cran.r-project.org}: %(Figure~\ref{fig:cran}) %\begin{figure}[htbp] %\begin{center} %%\includegraphics[width=14cm]{../../../tutorials/R_Short_APS.15/rcran3.png} %\includegraphics[width=20cm]{../../../images/CRAN.png} %\caption{The basic \href{http://cran.r-project.org}{CRAN} window allows you choose your operating system. Comprehensive R Archive Network (CRAN) is found at \href{http://cran.r-project.org}{http://cran.r-project.org}:} %\label{fig:cran} %\end{center} %\end{figure} Choose your operating system and then download and install the appropriate version %For a PC: %(Figure~\ref{fig:pc}) %\begin{figure}[htbp] %\begin{center} %%\includegraphics[width=14cm]{../../../tutorials/R_Short_APS.15/cranpc1.png} %\includegraphics[width=19cm]{../../../images/CRAN_pc.pdf} % %\caption{On a PC you want to choose the base system} %\label{fig:pc} %\end{center} %\end{figure} Download and install the appropriate version -- Mac, PC or Unix/Linux %\begin{figure}[htbp] %\begin{center} %%\includegraphics[width=14cm]{../../../tutorials/R_Short_APS.15/cran-pc15.png} %\includegraphics[width=19cm]{../../../images/CRAN_pc_16.png} %\caption{Download the Windows version} %\label{default} %\end{center} %\end{figure} %Starting R on a PC. Once you have installed \R{} you probably, and particularly if you have a PC, will want to download and install the \href{https://www.rstudio.com} {R Studio} program. It is a very nice interface for PCs and Macs that combines four windows into one screen. %\begin{figure}[htbp] %\begin{center} %\includegraphics[width=14cm]{../../../images/RStudio01.png} %\caption{Using R Studio on a PC. } %\label{fig:pcstartup} %\end{center} %\end{figure} % %When using a PC, RStudio is very helpful. (Many like it for Macs as well). % % %\begin{figure}[htbp] %\begin{center} %\includegraphics[width=14cm]{../../../images/RStudio01.png} %\caption{Using R Studio on a PC. } %\label{fig:pcRstudio} %\end{center} %\end{figure} % %\clearpage % %%For a Mac: download and install the appropriate version -- Mac (Figure~\ref{fig:mac}) %\begin{figure}[htbp] %\begin{center} %%\includegraphics[width=14cm]{../../../tutorials/R_Short_APS/cran-mac.png} %\includegraphics[width=19cm]{../../../images/cran_mac.png} %\caption{For the Mac, you want to choose the latest version which includes the GUI as well as the 32 and 64 bit versions.} %\label{fig:mac} %\end{center} %\end{figure} % %\newpage %Start up R and get ready to play (Mac version). %\begin{scriptsize} %\begin{Schunk} %\begin{Soutput} %R version 3.3.0 (2016-05-03) -- "Supposedly Educational" %Copyright (C) 2016 The R Foundation for Statistical Computing %Platform: x86_64-apple-darwin13.4.0 (64-bit) % %R is free software and comes with ABSOLUTELY NO WARRANTY. %You are welcome to redistribute it under certain conditions. %Type 'license()' or 'licence()' for distribution details. % % Natural language support but running in an English locale % %R is a collaborative project with many contributors. %Type 'contributors()' for more information and %'citation()' on how to cite R or R packages in publications. % %Type 'demo()' for some demos, 'help()' for on-line help, or %'help.start()' for an HTML browser interface to help. %Type 'q()' to quit R. % %[R.app GUI 1.68 (7202) x86_64-apple-darwin13.4.0] % %[Workspace restored from /Users/revelle/.RData] %[History restored from /Users/revelle/.Rapp.history] % %> %\end{Soutput} %\end{Schunk} %\end{scriptsize} \subsubsection{Install relevant packages} \label{installing} Once \R{} is installed on your machine, you still need to install a few relevant ``packages''. Packages are what make \R{} so powerful, for they are special sets of functions that are designed for one particular application. In the case of the \Rpkg{psych} package, this is an application for doing the kind of basic data analysis and psychometric analysis that psychologists and many others find particularly useful. \Rpkg{psych} may be thought of a ``Swiss Army Knife" for psychological statistics. While not the best tool for a particular job, it is a useful tool for many jobs. You may either install the minimum set of packages necessary to do the analysis using an Exploratory Factor Analysis (EFA) approach (recommended) or a few more packages to do both an EFA and a CFA approach. It is also possible to add many psychometrically relevant packages all at once by using the ``task views'' approach. A particularly powerful package is the \Rpkg{lavaan} \citep{lavaan} package for doing structural equation modeling. Another useful one is the \Rpkg{sem} pacakge \citep{sem}. \paragraph{Install the minimum set} This may be done by typing into the console or using menu options (e.g., the Package Installer underneath the Packages and Data menu). \begin{Rinput} install.packages(c("psych", "psychTools"), dependencies = TRUE) \end{Rinput} % %\begin{figure}[htbp] %\begin{center} %\includegraphics[width=14cm]{../../../images/RStudio02.PNG} %\caption{Installing packages using R studio on a PC. Use the install menu option.} %\label{fig:installPC} %\end{center} %\end{figure} \paragraph{Install a few more packages } If you want some more functionality for some of the more advanced statistical procedures (e.g., \pfun{omegaSem}) you will need to install a few more packages (e.g., \Rpkg{lavaan}. \begin{Rinput} install.packages(c("psych","GPArotation","lavaan"),dependencies=TRUE) \end{Rinput} \paragraph{Install a ``task view" to get lots of packages} If you know that there are a number of packages that you want to use, it is possible they are listed as a ``task view". For instance, about 50 packages will be installed at once if you install the ``psychometrics'' task view. You can Install all the psychometric packages from the ``psychometrics'' task view by first installing a package (``ctv") that in turn installs many different task views. To see the list of possible task views, go to \url{https://cran.r-project.org/web/views/}. ~\ \begin{Rinput} install.packages("ctv") } #this downloads the task view package library(ctv) #this activates the ctv package install.views("Psychometrics") #one of the many Taskviews \end{Rinput} Take a 5 minute break because you will be installing about 50 packages. \paragraph{For the more adventurous users} The \Rpkg{psych} pacakge is under (sporadic) development with a new release issued to CRAN roughly every 4-6 months. The experimental, development version (prerelease) is always available at the Personality-Project web site and may be installed for Macs or PCs directly: ~\ \begin{Rinput} install.packages("psych", repos= "https://personality-project.org/r", type ="source") \end{Rinput} This development version will have fixed any bugs reported since the last release and will have various new features that are being tested before release to CRAN. After installation, it is necessary to restart \R{} to make the new version active. \paragraph{Make the \Rpkg{psych} package active.} You are almost ready. But first, to use most of the following examples you need to make the \Rpkg{psych} and \Rpkg{psychTools} packages active. You only need to do this once per session. ~\ \begin{Rinput} library(psych) #to do the analyses described here library(psychTools) #for some useful additions such as read.file \end{Rinput} %(If you want to automate this last step, you can create a special command to be run every time you start \R{}. % %\begin{Rinput} %.First <- function() {library(psych)} %\end{Rinput} %Do this when you first start \R. Then quit with the save option. Then restart \R. You will now automatically have loaded the \Rpkg{psych} package every time you start \R{}.) % % \section{Reading in the data for analysis} \subsection{Find a file and read from it} There are of course many ways to enter data into \R. Reading from a local file using \pfun{read.file} is perhaps the most preferred. This will read in most of the standard file types (.csv, .sav, .txt, etc). \pfun{read.file} combines the \fun{file.choose} and \fun{read.table} functions: ~\ \begin{Rinput} my.data <- read.file() #note the open and closing parentheses \end{Rinput} \pfun{read.file} opens a search window on your system just like any open file command does. \pfun{read.file} assumes that the first row of your table has labels for each column. If this is not true, specify names=FALSE, e.g., ~\ \begin{Rinput} my.data <- read.file(names = FALSE) \end{Rinput} If you want to read a remote file, specify the file name and then \pfun{read.file} ~\ \begin{Rinput} datafilename <- "http://personality-project.org/r/datasets/finkel.sav" new.data <- read.file(datafilename) #the data has labels \end{Rinput} \subsection{Or: copy the data from another program using the copy and paste commands of your operating system} However, many users will enter their data in a text editor or spreadsheet program and then want to copy and paste into \R{}. This may be done by using one of the \pfun{read.clipboard} set of functions . \begin{description} \item [\pfun{read.clipboard}] is the base function for reading data from the clipboard. \item [\pfun{read.clipboard.csv}] for reading text that is comma delimited. \item [\pfun{read.clipboard.tab}] for reading text that is tab delimited (e.g., copied directly from an Excel file). \item [\pfun{read.clipboard.lower}] for reading input of a lower triangular matrix with or without a diagonal. The resulting object is a square matrix. \item [\pfun{read.clipboard.upper}] for reading input of an upper triangular matrix. \item[\pfun{read.clipboard.fwf}] for reading in fixed width fields (some very old data sets) \end{description} For example, given a data set copied to the clipboard from a spreadsheet, just enter the command ~\ \begin{Rinput} my.data <- read.clipboard() \end{Rinput} This will work if every data field has a value and even missing data are given some values (e.g., NA or -999). If the data were entered in a spreadsheet and the missing values were just empty cells, then the data should be read in as a tab delimited or by using the \pfun{read.clipboard.tab} function. ~\ \begin{Rinput} my.data <- read.clipboard(sep="\t") #define the tab option, or my.tab.data <- read.clipboard.tab() #just use the alternative function \end{Rinput} For the case of data in fixed width fields (some old data sets tend to have this format), copy to the clipboard and then specify the width of each field (in the example below, the first variable is 5 columns, the second is 2 columns, the next 5 are 1 column the last 4 are 3 columns). ~\ \begin{Rinput} my.data <- read.clipboard.fwf(widths=c(5,2,rep(1,5),rep(3,4)) \end{Rinput} \subsection{Or: import from an SPSS or SAS file} To read data from an SPSS, SAS, or Systat file, you can probably just use the \pfun{read.file} function. \pfun{read.file} examines the suffix of the data file and if it is .sav (from SPSS) or .xpt (from SAS) will attempt to read given various default options. However, if that does not work, use the \Rpkg{foreign} package. This should come with Base \R{} but still need to be loaded using the \Rfunction{library} command. \fun{read.spss} reads a file stored by the SPSS save or export commands. \begin{verbatim}read.spss(file, use.value.labels = TRUE, to.data.frame = FALSE, max.value.labels = Inf, trim.factor.names = FALSE, trim_values = TRUE, reencode = NA, use.missings = to.data.frame) \end{verbatim} The \Rfunction{read.spss} function has many parameters that need to be set. In the example, I have used the parameters that I think are most useful. \begin{description} \item [file] Character string: the name of the file or URL to read. \item [use.value.labels] Convert variables with value labels into R factors with those levels? \item [to.data.frame] return a data frame? Defaults to FALSE, probably should be TRUE in most cases. \item [max.value.labels] Only variables with value labels and at most this many unique values will be converted to factors if use.value.labels $= TRUE$. \item [trim.factor.names] Logical: trim trailing spaces from factor levels? \item [trim\_values] logical: should values and value labels have trailing spaces ignored when matching for use.value.labels $= TRUE $? \item [use.missings] logical: should information on user-defined missing values be used to set the corresponding values to NA? \end{description} The following is an example of reading from a remote SPSS file and then describing the data set to make sure that it looks ok (with thanks to Eli Finkel). ~\ \begin{Rinput} datafilename <- "http://personality-project.org/r/datasets/finkel.sav" eli <-read.file(datafilename) describe(eli,skew=FALSE) \end{Rinput} \begin{Routput} var n mean sd median trimmed mad min max range se USER* 1 69 35.00 20.06 35 35.00 25.20 1 69 68 2.42 HAPPY 2 69 5.71 1.04 6 5.82 0.00 2 7 5 0.13 SOULMATE 3 69 5.09 1.80 5 5.32 1.48 1 7 6 0.22 ENJOYDEX 4 68 6.47 1.01 7 6.70 0.00 2 7 5 0.12 UPSET 5 69 0.41 0.49 0 0.39 0.00 0 1 1 0.06 \end{Routput} \section{Some simple descriptive statistics before you start} Although you probably want to jump right in and find $\omega$, you should first make sure that your data are reasonable. Use the \pfun{describe} function to get some basic descriptive statistics. This next example takes advantage of a built in data set. ~\ \begin{Sinput} my.data <- sat.act #built in example -- replace with your data describe(my.data) \end{Sinput} \begin{Soutput} var n mean sd median trimmed mad min max range skew kurtosis se gender 1 700 1.65 0.48 2 1.68 0.00 1 2 1 -0.61 -1.62 0.02 education 2 700 3.16 1.43 3 3.31 1.48 0 5 5 -0.68 -0.07 0.05 age 3 700 25.59 9.50 22 23.86 5.93 13 65 52 1.64 2.42 0.36 ACT 4 700 28.55 4.82 29 28.84 4.45 3 36 33 -0.66 0.53 0.18 SATV 5 700 612.23 112.90 620 619.45 118.61 200 800 600 -0.64 0.33 4.27 SATQ 6 687 610.22 115.64 620 617.25 118.61 200 800 600 -0.59 -0.02 4.41 \end{Soutput} There are, of course, all kinds of things you could do with your data at this point, but read about them in the \href{http://cran.r-project.org/web/packages/psych/vignettes/intro.pdf}{introductory vignette} and \href{http://cran.r-project.org/web/packages/psychTools/vignettes/overview.pdf}{more advanced vignette} for the \Rpkg{psych} package, \section{Using the \pfun{omega} function to find $\omega$} Two alternative estimates of reliability that take into account the hierarchical structure of the inventory are McDonald's $\omega_h$ and $\omega_t$ \citep{mcdonald:tt,rz:09}. These may be found using the \pfun{omega} function for an exploratory analysis (See Figure~\ref{fig:omega.9}) or \pfun{omegaSem} for a confirmatory analysis using the \Rpkg{sem} based upon the exploratory solution from \pfun{omega}. \subsection{Background on the $\omega$ statistics} \cite{mcdonald:tt} has proposed coefficient omega (hierarchical) ($\omega_h$) as an estimate of the general factor saturation of a test. \href{http://personality-project.org/revelle/publications/zinbarg.revelle.pmet.05.pdf}{\cite{zinbarg:pm:05}} compare McDonald's $\omega_h$ to Cronbach's $\alpha$ and Revelle's $\beta$. They conclude that $\omega_h$ is the best estimate. (See also \cite{zinbarg:apm:06} and \cite{rz:09} ). One way to find $\omega_h$ is to do a factor analysis of the original data set, rotate the factors obliquely, factor that correlation matrix, do a Schmid-Leiman (\pfun{schmid}) transformation to find general factor loadings, and then find $\omega_h$. $\omega_h$ differs slightly as a function of how the factors are estimated. Three options are available, the default will do a minimum residual factor analysis, fm=``pa" does a principal axes factor analysis (\pfun{factor.pa}), and fm=``mle" provides a maximum likelihood solution. For ability items, it is typically the case that all items will have positive loadings on the general factor. However, for non-cognitive items it is frequently the case that some items are to be scored positively, and some negatively. Although probably better to specify which directions the items are to be scored by specifying a key vector, if flip =TRUE (the default), items will be reversed so that they have positive loadings on the general factor. The keys are reported so that scores can be found using the \pfun{score.items} function. Arbitrarily reversing items this way can overestimate the general factor. (See the example with a simulated circumplex). The \pfun{omega} function uses exploratory factor analysis to estimate the $\omega_h$ coefficient. It is important to remember that ``A recommendation that should be heeded, regardless of the method chosen to estimate $\omega_h$, is to always examine the pattern of the estimated general factor loadings prior to estimating $\omega_h$. Such an examination constitutes an informal test of the assumption that there is a latent variable common to all of the scale's indicators that can be conducted even in the context of EFA. If the loadings were salient for only a relatively small subset of the indicators, this would suggest that there is no true general factor underlying the covariance matrix. Just such an informal assumption test would have afforded a great deal of protection against the possibility of misinterpreting the misleading $\omega_h$ estimates occasionally produced in the simulations reported here." \citep[][p 137]{zinbarg:apm:06}. Although $\omega_h$ is uniquely defined only for cases where 3 or more subfactors are extracted, it is sometimes desired to have a two factor solution. By default this is done by forcing the \pfun{schmid} extraction to treat the two subfactors as having equal loadings. There are three possible options for this condition: setting the general factor loadings between the two lower order factors to be ``equal" which will be the $\sqrt{r_{ab}}$ where $r_{ab}$ is the oblique correlation between the factors) or to ``first" or ``second" in which case the general factor is equated with either the first or second group factor. A message is issued suggesting that the model is not really well defined. This solution discussed in Zinbarg et al., 2007. To do this in omega, add the option=``first" or option=``second" to the call. Although obviously not meaningful for a 1 factor solution, it is of course possible to find the sum of the loadings on the first (and only) factor, square them, and compare them to the overall matrix variance. This is done, with appropriate complaints. In addition to $\omega_h$, another of McDonald's coefficients is $\omega_t$. This is an estimate of the total reliability of a test. McDonald's $\omega_t$, which is similar to Guttman's $\lambda_6$, (see \pfun{guttman}) uses the estimates of uniqueness $u^2$ from factor analysis to find $e_j^2$. This is based on a decomposition of the variance of a test score, $V_x$ into four parts: that due to a general factor, $\vec{g}$, that due to a set of group factors, $\vec{f}$, (factors common to some but not all of the items), specific factors, $\vec{s}$ unique to each item, and $\vec{e}$, random error. (Because specific variance can not be distinguished from random error unless the test is given at least twice, some combine these both into error). Letting $\vec{x} = \vec{cg} + \vec{Af} + \vec {Ds} + \vec{e} $ then the communality of item$_j$, based upon general as well as group factors, $h_j^2 = c_j^2 + \sum{f_{ij}^2}$ and the unique variance for the item $u_j^2 = \sigma_j^2 (1-h_j^2)$ may be used to estimate the test reliability. That is, if $h_j^2$ is the communality of item$_j$, based upon general as well as group factors, then for standardized items, $e_j^2 = 1 - h_j^2$ and $$ \omega_t = \frac{\vec{1}\vec{cc'}\vec{1} + \vec{1}\vec{AA'}\vec{1}'}{V_x} = 1 - \frac{\sum(1-h_j^2)}{V_x} = 1 - \frac{\sum u^2}{V_x} $$ Because $h_j^2 \geq r_{smc}^2$, $\omega_t \geq \lambda_6$. It is important to distinguish here between the two $\omega$ coefficients of McDonald, 1978 and Equation 6.20a of McDonald, 1999, $\omega_t$ and $\omega_h$. While the former is based upon the sum of squared loadings on all the factors, the latter is based upon the sum of the squared loadings on the general factor. $$\omega_h = \frac{ \vec{1}\vec{cc'}\vec{1}}{V_x}$$ Another estimate reported is the omega for an infinite length test with a structure similar to the observed test. This is found by $$\omega_{\inf} = \frac{ \vec{1}\vec{cc'}\vec{1}}{\vec{1}\vec{cc'}\vec{1} + \vec{1}\vec{AA'}\vec{1}'}$$ It can be shown In the case of simulated variables, that the amount of variance attributable to a general factor ($\omega_h$) is quite large, and the reliability of the set of items is somewhat greater than that estimated by $\alpha$ or $\lambda_6$. \subsection{Yet another alternative: Coefficient $\beta$} $\beta$, an alternative to $\omega_h$, is defined as the worst split half reliability \citep{revelle:iclust}. It can be estimated by using \pfun{iclust} (Item Cluster analysis: a hierarchical clustering algorithm). For a very complimentary review of why the iclust algorithm is useful in scale construction, see \cite{cooksey:06}. For a discussion of how use \pfun{iclust} see the \href{http://cran.r-project.org/web/packages/psychTools/vignettes/factor.pdf}{factor analysis vignette}. \subsection{Using the \pfun{omega} function} This is \R{}. Just call it. For the next example, we find $\omega$ for a data set from Thurstone. To find it for your data, replace Thurstone with my.data. ~\ <>== omega(Thurstone) @ %\begin{Routput} % %Omega %Call: omega(m = Thurstone) %Alpha: 0.89 %G.6: 0.91 %Omega Hierarchical: 0.74 %Omega H asymptotic: 0.79 %Omega Total 0.93 % %Schmid Leiman Factor loadings greater than 0.2 % g F1* F2* F3* h2 u2 p2 %Sentences 0.71 0.57 0.82 0.18 0.61 %Vocabulary 0.73 0.55 0.84 0.16 0.63 %Sent.Completion 0.68 0.52 0.73 0.27 0.63 %First.Letters 0.65 0.56 0.73 0.27 0.57 %4.Letter.Words 0.62 0.49 0.63 0.37 0.61 %Suffixes 0.56 0.41 0.50 0.50 0.63 %Letter.Series 0.59 0.61 0.72 0.28 0.48 %Pedigrees 0.58 0.23 0.34 0.50 0.50 0.66 %Letter.Group 0.54 0.46 0.53 0.47 0.56 % %With eigenvalues of: % g F1* F2* F3* %3.58 0.96 0.74 0.71 % %general/max 3.71 max/min = 1.35 %mean percent general = 0.6 with sd = 0.05 and cv of 0.09 % %The degrees of freedom are 12 and the fit is 0.01 % %The root mean square of the residuals is 0 %The df corrected root mean square of the residuals is 0.01 % %Compare this with the adequacy of just a general factor and no group factors %The degrees of freedom for just the general factor are 27 and the fit is 1.48 % %The root mean square of the residuals is 0.1 %The df corrected root mean square of the residuals is 0.16 % %Measures of factor score adequacy % g F1* F2* F3* %Correlation of scores with factors 0.86 0.73 0.72 0.75 %Multiple R square of scores with factors 0.74 0.54 0.52 0.56 %Minimum correlation of factor score estimates 0.49 0.08 0.03 0.11 %> % \end{Routput} \subsection{Find three measures of reliability: $\omega_h$, $\alpha$, and $\omega_t$} In a review of various measures of reliability, \cite{rc:pa:19} suggest that one should routinely report 3 estimates of internal consistency ($\omega_h$, $\alpha$, and $\omega_t$). As an example, they use 10 items to measure anxiety taken from the state anxiety data set (\pfun{sai} in the \Rpkg{psychTools} package. First examine the descriptive statistics and then find and summarize the omega for these data. By inspection of the correlation matrix, it seems as if there are two group factors (tension and calmness) as well as an overall general factor of anxiety. We use a two factor solution to better represent the results (Figure~\ref{fig.anxiety}). ~\ <>= anxiety <- sai[c("anxious", "jittery", "nervous" ,"tense", "upset","at.ease" , "calm" , "confident", "content","relaxed")] describe(anxiety) lowerCor(anxiety) om <- omega(anxiety,2) #specify a two factor solution summary(om) #summarize the output @ <>== png('anxiety.png') omega.diagram(om, main="Omega analysis of two factors of anxiety") dev.off() @ \begin{figure}[htbp] \begin{center} \includegraphics{anxiety} \caption{An \pfun{omega} solution for 10 anxiety items with two group factors. See \cite{rc:pa:19} for more measures of reliability for this data set.} \label{fig.anxiety} \end{center} \end{figure} \subsection{Estimating $\omega_h$ using a direct Schmid-Leiman transformation} The \pfun{omegaDirect} function uses Niels Waller's algorithm for finding a g factor directly without extracting a higher order model \citep{waller:17}. This has the advantage that it will work cleanly for data with just 2 group factors. Unfortunately, it will produce non-zero estimates for omega even if there is no general factor. ~\ <>= om <- omegaDirect(Thurstone) om @ <>== png('direct.png') omega.diagram(om, main="Direct Schmid Leihman solution") dev.off() @ \begin{figure}[htbp] \begin{center} \includegraphics{direct} \caption{The Direct Schmid Leiman solution is taken from an algorithm by \cite{waller:17}. Compare this solution to Figure~\ref{fig:omega.9}. } \label{fig:direct} \end{center} \end{figure} \subsection{Estimating $\omega_h$ using Confirmatory Factor Analysis} The \pfun{omegaSem} function will do an exploratory analysis and then take the highest loading items on each factor and do a confirmatory factor analysis using the \Rpkg{lavaan} package. These results can produce slightly different estimates of $\omega_h$, primarily because cross loadings are modeled as part of the general factor. We use a classic data set from Holzinger and Swineford, some of the tests of which are included in the \Rpkg{lavaan} package. This analysis allows us to examine the hierarchical structure of these ability tests. The data are taken from the \pfun{holzinger.swineford} data set in the \Rpkg{psychTools} package. ~\ <>= om <- omega(holzinger.swineford[8:31],4) #the exploratory solution omegaSem(holzinger.swineford[8:31],4) #the confirmatory solution @ %\begin{Routput} %Call: omegaSem(m = r9, n.obs = 500) %Omega %Call: omega(m = m, nfactors = nfactors, fm = fm, key = key, flip = flip, % digits = digits, title = title, sl = sl, labels = labels, % plot = plot, n.obs = n.obs, rotate = rotate, Phi = Phi, option = option) %Alpha: 0.75 %G.6: 0.74 %Omega Hierarchical: 0.66 %Omega H asymptotic: 0.84 %Omega Total 0.78 % %Schmid Leiman Factor loadings greater than 0.2 % g F1* F2* F3* h2 u2 p2 %V1 0.70 0.53 0.47 0.93 %V2 0.70 0.52 0.48 0.94 %V3 0.54 0.32 0.68 0.91 %V4 0.53 0.46 0.50 0.50 0.57 %V5 0.44 0.44 0.39 0.61 0.50 %V6 0.40 0.32 0.26 0.74 0.59 %V7 0.31 0.31 0.21 0.79 0.48 %V8 0.34 0.44 0.30 0.70 0.37 %V9 0.24 0.36 0.19 0.81 0.32 % %With eigenvalues of: % g F1* F2* F3* %2.18 0.52 0.08 0.44 % %general/max 4.21 max/min = 6.17 %mean percent general = 0.62 with sd = 0.24 and cv of 0.39 % %The degrees of freedom are 12 and the fit is 0.03 %The number of observations was 500 with Chi Square = 14.23 with prob < 0.29 %The root mean square of the residuals is 0.01 %The df corrected root mean square of the residuals is 0.03 %RMSEA index = 0.02 and the 90 % confidence intervals are NA 0.052 %BIC = -60.35 % %Compare this with the adequacy of just a general factor and no group factors %The degrees of freedom for just the general factor are 27 and the fit is 0.21 %The number of observations was 500 with Chi Square = 103.64 with prob < 6.4e-11 %The root mean square of the residuals is 0.05 %The df corrected root mean square of the residuals is 0.08 % %RMSEA index = 0.076 and the 90 % confidence intervals are 0.06 0.091 %BIC = -64.15 % %Measures of factor score adequacy % g F1* F2* F3* %Correlation of scores with factors 0.86 0.63 0.25 0.59 %Multiple R square of scores with factors 0.74 0.39 0.06 0.35 %Minimum correlation of factor score estimates 0.48 -0.21 -0.88 -0.30 % % Omega Hierarchical from a confirmatory model using sem = 0.68 % Omega Total from a confirmatory model using sem = 0.78 %With loadings of % g F1* F2* F3* h2 u2 %V1 0.73 0.54 0.46 %V2 0.68 0.29 0.54 0.46 %V3 0.51 0.22 0.31 0.69 %V4 0.54 0.47 0.51 0.49 %V5 0.45 0.42 0.38 0.62 %V6 0.39 0.31 0.25 0.75 %V7 0.34 0.34 0.23 0.77 %V8 0.36 0.39 0.28 0.72 %V9 0.26 0.33 0.18 0.82 % %With eigenvalues of: % g F1* F2* F3* %2.21 0.49 0.14 0.38 %\end{Routput} <>= @ \section{Simulating a hierarchical/higher order structure} There are several simulation functions in the \Rpkg{psych} package for creating structures with a general factor. One, \pfun{sim.hierarchical} creates lower level factors which are all correlated with a general factor. The default simulation has the parameters discussed by \cite{jensen:weng}. Another way to simulate a hierarchical structure is to simulate a bifactor model directly using the \pfun{sim.structure} function. The \cite{jensen:weng} model: <>= jen <- sim.hierarchical() #use the default values om <- omega(jen) om @ \begin{figure}[htbp] \begin{center} \begin{scriptsize} <>= png('jensen.png' ) omega.diagram(om) dev.off() @ \end{scriptsize} \includegraphics{jensen} \caption{An example of a hierarchical model from Jensen.} \label{fig:outlier} \end{center} \end{figure} %\begin{Routput} %jen <- sim.hierarchical() #use the default values %> om <- omega(jen) %> om %Omega %Call: omega(m = jen) %Alpha: 0.76 %G.6: 0.76 %Omega Hierarchical: 0.69 %Omega H asymptotic: 0.86 %Omega Total 0.8 % %Schmid Leiman Factor loadings greater than 0.2 % g F1* F2* F3* h2 u2 p2 %V1 0.72 0.35 0.64 0.36 0.81 %V2 0.63 0.31 0.49 0.51 0.81 %V3 0.54 0.26 0.36 0.64 0.81 %V4 0.56 0.42 0.49 0.51 0.64 %V5 0.48 0.36 0.36 0.64 0.64 %V6 0.40 0.30 0.25 0.75 0.64 %V7 0.42 0.43 0.36 0.64 0.49 %V8 0.35 0.36 0.25 0.75 0.49 %V9 0.28 0.29 0.16 0.84 0.49 % %With eigenvalues of: % g F1* F2* F3* %2.29 0.28 0.40 0.39 % %general/max 5.78 max/min = 1.4 %mean percent general = 0.65 with sd = 0.14 and cv of 0.21 %Explained Common Variance of the general factor = 0.68 % %The degrees of freedom are 12 and the fit is 0 % %The root mean square of the residuals is 0 %The df corrected root mean square of the residuals is 0 % %Compare this with the adequacy of just a general factor and no group factors %The degrees of freedom for just the general factor are 27 and the fit is 0.18 % %The root mean square of the residuals is 0.06 %The df corrected root mean square of the residuals is 0.07 % %Measures of factor score adequacy % g F1* F2* F3* %Correlation of scores with factors 0.85 0.46 0.57 0.57 %Multiple R square of scores with factors 0.73 0.21 0.32 0.32 %Minimum correlation of factor score estimates 0.46 -0.57 -0.35 -0.35 % % Total, General and Subset omega for each subset % g F1* F2* F3* %Omega total for total scores and subscales 0.80 0.74 0.63 0.50 %Omega general for total scores and subscales 0.69 0.60 0.40 0.25 %Omega group for total scores and subscales 0.11 0.14 0.23 0.26 %> %\end{Routput} \subsubsection{Simulate a bifactor model} Simulate a bifactor model and then compare two ways of finding the solution (normal omega and directOmega). We compare the solutions using the \pfun{fa.congruence} function. \begin{Rinput} fx <- matrix(c(.7,.6,.5,.7,.6,.5,.8,.7,.6, .6,.6,.6,rep(0,9),c(.6,.5,.6),rep(0,9),.6,.6,.6),ncol=4) simx <-sim.structure(fx) lowerMat(simx$model) om <- omega(simx$model) dsl <- omegaDirect(simx$model) summary(om) summary(dsl) fa.congruence(list(om,dsl,fx)) \end{Rinput} <>== fx <- matrix(c(.7,.6,.5,.7,.6,.5,.8,.7,.6, .6,.6,.6,rep(0,9),c(.6,.5,.6),rep(0,9),.6,.6,.6),ncol=4) simx <-sim.structure(fx) om <- omega(simx$model) dsl <- omegaDirect(simx$model) @ \begin{scriptsize} <>= lowerMat(simx$model) summary(om) summary(dsl) fa.congruence(list(om,dsl,fx)) @ \end{scriptsize} %\begin{Routput} %summary(om) %Omega %Alpha: 0.9 %G.6: 0.93 %Omega Hierarchical: 0.74 %Omega H asymptotic: 0.78 %Omega Total 0.95 % %With eigenvalues of: % g F1* F2* F3* %3.67 1.08 1.08 0.97 %The degrees of freedom for the model is 12 and the fit was 0 % %The root mean square of the residuals is 0 %The df corrected root mean square of the residuals is 0 %Explained Common Variance of the general factor = 0.54 % % Total, General and Subset omega for each subset % g F1* F2* F3* %Omega total for total scores and subscales 0.95 0.95 0.89 0.87 %Omega general for total scores and subscales 0.74 0.55 0.45 0.46 %Omega group for total scores and subscales 0.21 0.40 0.44 0.41 %> summary(dsl) %Call: omegaDirect(m = simx$model) %Omega H direct: 0.71 % %With eigenvalues of: % g F1* F2* F3* %3.53 1.22 1.06 0.99 %The degrees of freedom for the model is 12 and the fit was 0 % %The root mean square of the residuals is 0 %The df corrected root mean square of the residuals is 0 % % Total, General and Subset omega for each subset % g F1* F2* F3* %Omega total for total scores and subscales 0.95 0.95 0.89 0.87 %Omega general for total scores and subscales 0.71 0.50 0.45 0.45 %Omega group for total scores and subscales 0.22 0.45 0.43 0.42 %> fa.congruence(list(om,dsl,fx)) % g F1* F2* F3* h2 g F1* F2* F3* %g 1.00 0.64 0.55 0.54 1.00 1.00 0.67 0.56 0.57 1.00 0.54 0.54 0.63 %F1* 0.64 1.00 0.00 0.00 0.65 0.62 1.00 0.02 0.04 0.64 0.00 0.00 1.00 %F2* 0.55 0.00 1.00 0.00 0.55 0.56 0.02 1.00 0.00 0.55 1.00 0.00 0.00 %F3* 0.54 0.00 0.00 1.00 0.52 0.55 0.03 0.00 1.00 0.54 0.00 1.00 0.00 %h2 1.00 0.65 0.55 0.52 1.00 1.00 0.68 0.57 0.55 1.00 0.55 0.52 0.64 %g 1.00 0.62 0.56 0.55 1.00 1.00 0.65 0.58 0.57 1.00 0.56 0.55 0.62 %F1* 0.67 1.00 0.02 0.03 0.68 0.65 1.00 0.04 0.07 0.67 0.02 0.03 1.00 %F2* 0.56 0.02 1.00 0.00 0.57 0.58 0.04 1.00 0.00 0.56 1.00 0.00 0.02 %F3* 0.57 0.04 0.00 1.00 0.55 0.57 0.07 0.00 1.00 0.57 0.00 1.00 0.03 % 1.00 0.64 0.55 0.54 1.00 1.00 0.67 0.56 0.57 1.00 0.54 0.54 0.63 % 0.54 0.00 1.00 0.00 0.55 0.56 0.02 1.00 0.00 0.54 1.00 0.00 0.00 % 0.54 0.00 0.00 1.00 0.52 0.55 0.03 0.00 1.00 0.54 0.00 1.00 0.00 % 0.63 1.00 0.00 0.00 0.64 0.62 1.00 0.02 0.03 0.63 0.00 0.00 1.00 %> %\end{Routput} \section{Summary} In the modern era of computation, there is little justification for continuing with procedures that were developed as \href{https://personality-project.org/revelle/publications/cup.18.final.pdf}{short-cuts 80 years ago} \citep{reh:20}, To find $\omega_h$, $\alpha$, and $\omega_t$ is very easy using the open source statistical system (\R{}) as well as the \pfun{omega} functions in the \Rpkg{psych} package. \section{System Info} When running any \R{} package, it is useful to find out the session information to see if you have the most recent releases. \begin{scriptsize} <>= sessionInfo() @ \end{scriptsize} \newpage \begin{thebibliography}{} \bibitem[\protect\astroncite{Azzalini and Genz}{2016}]{mnormt} Azzalini, A. and Genz, A. (2016). \newblock {\em The {R} package \texttt{mnormt}: The multivariate normal and $t$ distributions (version 1.5-5)}. \bibitem[\protect\astroncite{Bernaards and Jennrich}{2005}]{GPA} Bernaards, C. and Jennrich, R. (2005). \newblock {Gradient projection algorithms and software for arbitrary rotation criteria in factor analysis}. \newblock {\em Educational and Psychological Measurement}, 65(5):676--696. \bibitem[\protect\astroncite{Cooksey and Soutar}{2006}]{cooksey:06} Cooksey, R. and Soutar, G. (2006). \newblock Coefficient beta and hierarchical item clustering - an analytical procedure for establishing and displaying the dimensionality and homogeneity of summated scales. \newblock {\em Organizational Research Methods}, 9:78--98. \bibitem[\protect\astroncite{Cronbach}{1951}]{cronbach:51} Cronbach, L.~J. (1951). \newblock Coefficient alpha and the internal structure of tests. \newblock {\em Psychometrika}, 16:297--334. \bibitem[\protect\astroncite{Fox et~al.}{2013}]{sem} Fox, J., Nie, Z., and Byrnes, J. (2013). \newblock {\em sem: Structural Equation Models}. \newblock R package version 3.1-3. \bibitem[\protect\astroncite{Guttman}{1945}]{guttman:45} Guttman, L. (1945). \newblock A basis for analyzing test-retest reliability. \newblock {\em Psychometrika}, 10(4):255--282. \bibitem[\protect\astroncite{Jensen and Weng}{1994}]{jensen:weng} Jensen, A.~R. and Weng, L.-J. (1994). \newblock What is a good g? \newblock {\em Intelligence}, 18(3):231--258. \bibitem[\protect\astroncite{Kuder and Richardson}{1937}]{kuder:37} Kuder, G. and Richardson, M. (1937). \newblock The theory of the estimation of test reliability. \newblock {\em Psychometrika}, 2(3):151--160. \bibitem[\protect\astroncite{McDonald}{1999}]{mcdonald:tt} McDonald, R.~P. (1999). \newblock {\em Test theory: {A} unified treatment}. \newblock L. Erlbaum Associates, Mahwah, N.J. \bibitem[\protect\astroncite{{R Core Team}}{2020}]{R} {R Core Team} (2020). \newblock {\em R: A Language and Environment for Statistical Computing}. \newblock R Foundation for Statistical Computing, Vienna, Austria. \bibitem[\protect\astroncite{Revelle}{1979}]{revelle:iclust} Revelle, W. (1979). \newblock Hierarchical cluster-analysis and the internal structure of tests. \newblock {\em Multivariate Behavioral Research}, 14(1):57--74. \bibitem[\protect\astroncite{Revelle}{2020}]{psych} Revelle, W. (2020). \newblock {\em psych: Procedures for Personality and Psychological Research}. \newblock Northwestern University, Evanston, https://CRAN.r-project.org/package=psych. \newblock R package version 2.0.8. \bibitem[\protect\astroncite{Revelle and Condon}{2018}]{rc:reliability} Revelle, W. and Condon, D.~M. (2018). \newblock Reliability. \newblock In Irwing, P., Booth, T., and Hughes, D.~J., editors, {\em The {Wiley Handbook of Psychometric Testing:} A Multidisciplinary Reference on Survey, Scale and Test Development}. John Wily \& Sons, London. \bibitem[\protect\astroncite{Revelle and Condon}{2019}]{rc:pa:19} Revelle, W. and Condon, D.~M. (2019). \newblock Reliability from $\alpha$ to $\omega$: A tutorial. \newblock {\em Psychological Assessment} 31 (12) p 1395-1411. \bibitem[\protect\astroncite{Revelle et al.}{2020}]{reh:20} Revelle, W. and Elleman, L.G. and Hall, A. (2020). \newblock Statistical analyses and computer programming in personality. \newblock In Corr, P.J. editor, {\em The {Cambridge University Press Handbook of Personality}}. {Cambridge University Press}. \bibitem[\protect\astroncite{Revelle and Zinbarg}{2009}]{rz:09} Revelle, W. and Zinbarg, R.~E. (2009). \newblock Coefficients alpha, beta, omega and the glb: comments on {Sijtsma}. \newblock {\em Psychometrika}, 74(1):145--154. \bibitem[\protect\astroncite{Rosseel}{2012}]{lavaan} Rosseel, Y. (2012). \newblock {lavaan}: An {R} package for structural equation modeling. \newblock {\em Journal of Statistical Software}, 48(2):1--36. \bibitem[\protect\astroncite{Waller}{2017}]{waller:17} Waller, N.~G. (2017). \newblock Direct {Schmid-Leiman} transformations and rank-deficient loadings matrices. \newblock {\em Psychometrika.} \bibitem[\protect\astroncite{Zinbarg et~al.}{2005}]{zinbarg:pm:05} Zinbarg, R.~E., Revelle, W., Yovel, I., and Li, W. (2005). \newblock Cronbach's {$\alpha$}, {Revelle's} {$\beta$}, and {McDonald's} {$\omega_H$}: Their relations with each other and two alternative conceptualizations of reliability. \newblock {\em Psychometrika}, 70(1):123--133. \bibitem[\protect\astroncite{Zinbarg et~al.}{2006}]{zinbarg:apm:06} Zinbarg, R.~E., Yovel, I., Revelle, W., and McDonald, R.~P. (2006). \newblock Estimating generalizability to a latent variable common to all of a scale's indicators: A comparison of estimators for {$\omega_h$}. \newblock {\em Applied Psychological Measurement}, 30(2):121--144. \end{thebibliography} \end{document}