% \documentclass[serif]{beamer} % Serif for Computer Modern math font. \documentclass[serif, handout]{beamer} % Handout mode to ignore pause statements \hypersetup{colorlinks,linkcolor=,urlcolor=red} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Suppress navigation symbols % \usetheme{Berlin} % Displays sections on top \usetheme{Frankfurt} % Displays section titles on top: Fairly thin but still swallows some material at bottom of crowded slides %\usetheme{Berkeley} \usepackage[english]{babel} \usepackage{amsmath} % for binom % \usepackage{graphicx} % To include pdf files! % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \setbeamertemplate{footline}[frame number] \mode \title{Rules for Two-stage Proofs of Identifiability\footnote{See last slide for copyright information.}} \subtitle{STA 2101F 2019} \date{} % To suppress date \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Overview} \tableofcontents \end{frame} \section{The Two-stage Idea} \begin{frame} \frametitle{The two-stage model: $cov(\mathbf{D}_i)=\boldsymbol{\Sigma}$} % \framesubtitle{All variables are centered} {\LARGE \begin{eqnarray*} \mathbf{Y}_i &=& \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \mathbf{F}_i &=& \left( \begin{array}{c} \mathbf{X}_i \\ \mathbf{Y}_i \end{array} \right) \\ \mathbf{D}_i &=& \boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i \pause \end{eqnarray*} } % End size \begin{itemize} \item $\mathbf{X}_i$ is $p \times 1$, $\mathbf{Y}_i$ is $q \times 1$, $\mathbf{D}_i$ is $k \times 1$. \pause \item $cov(\mathbf{X}_i)=\boldsymbol{\Phi}_x$, $cov(\boldsymbol{\epsilon}_i)=\boldsymbol{\Psi}$ \item $cov(\mathbf{F}_i) = cov\left( \begin{array}{c} \mathbf{X}_i \\ \mathbf{Y}_i \end{array} \right) =\boldsymbol{\Phi} = \left( \begin{array}{c c} \boldsymbol{\Phi}_{11} & \boldsymbol{\Phi}_{12} \\ \boldsymbol{\Phi}_{12}^\top & \boldsymbol{\Phi}_{22} \\ \end{array} \right)$ \item $cov(\mathbf{e}_i)=\boldsymbol{\Omega}$ \end{itemize} \end{frame} \begin{frame} \frametitle{Identify parameter matrices in two steps} \framesubtitle{It does not really matter which one you do first.} \pause %{\footnotesize \begin{itemize} \item $\mathbf{Y}_i = \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i$ \begin{itemize} \item[] $cov(\mathbf{X}_i)=\boldsymbol{\Phi}_x$, $cov(\boldsymbol{\epsilon}_i)=\boldsymbol{\Psi}$ \end{itemize} \pause \item $\mathbf{D}_i = \boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i $ \begin{itemize} \item[] $cov(\mathbf{F}_i)=\boldsymbol{\Phi}$, $cov(\boldsymbol{e}_i)=\boldsymbol{\Omega}$ \end{itemize} \end{itemize} \pause %} % End size \vspace{3mm} \hrule \vspace{3mm} %{\Large \begin{enumerate} \item \emph{Latent model}: Show $\boldsymbol{\beta}$, $\boldsymbol{\Gamma}$, $\boldsymbol{\Phi}_x$ and $\boldsymbol{\Psi}$ can be recovered from $\boldsymbol{\Phi} = cov\left( \begin{array}{c} \mathbf{X}_i \\ \mathbf{Y}_i \end{array} \right)$. \pause \item \emph{Measurement model}: Show $\boldsymbol{\Phi}$ and $\boldsymbol{\Omega}$ can be recovered from $\boldsymbol{\Sigma}=cov(\mathbf{D}_i)$. \pause \end{enumerate} This means all the parameters can be recovered from $\boldsymbol{\Sigma}$. %} % End size \end{frame} \begin{frame} \frametitle{Parameter count rule} \framesubtitle{A necessary condition overall and at each stage} If a model has more parameters than covariance structure equations, the parameter vector can be identifiable on at most a set of volume zero in the parameter space. % This applies to all models. \end{frame} \begin{frame} \frametitle{All the following rules} %\framesubtitle{} \begin{itemize} \item Are sufficient conditions for identifiability from the covariance matrix; they are not necessary conditions. \pause \item Assume that errors are independent of exogenous variables that are not errors. \pause \item Assume all variables have expected value zero, so these models have been re-parameterized by centering --- or by just ignoring intercepts and expected values. \end{itemize} \end{frame} \section{Latent Model Rules} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Latent Model Rules} \begin{itemize} \item $\mathbf{Y}_i = \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i$ \pause \item Here, identifiability means that the parameters $\boldsymbol{\beta}$, $\boldsymbol{\Gamma}$, $\boldsymbol{\Phi}_x$ and $\boldsymbol{\Psi}$ are functions of $cov(\mathbf{F}_i)=\boldsymbol{\Phi}$. \end{itemize} \end{frame} \begin{frame} \frametitle{Regression Rule} \framesubtitle{Someimes called the Null Beta Rule} \pause Suppose \begin{itemize} \item No endogenous variables influence other endogenous variables. \pause \item[] \item $\mathbf{Y}_i = \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i$ \pause \item Of course $cov(\mathbf{X}_i, \boldsymbol{\epsilon}_i) = \mathbf{0}$, always. \pause \item $\boldsymbol{\Psi} = cov(\boldsymbol{\epsilon}_i)$ need not be diagonal. \pause \end{itemize} \vspace{5mm} Then $\boldsymbol{\Gamma}$ and $\boldsymbol{\Psi}$ are identifiable. \end{frame} \begin{frame} \frametitle{Acyclic Rule} \framesubtitle{Note that each endogenous variable is influenced by exactly one error term, and by at least one other variable.} \pause Parameters of the Latent Variable Model are identifiable if the model is acyclic \pause (no feedback loops through straight arrows) \pause and the following conditions hold. \pause \begin{itemize} \item Organize the variables that are not error terms into sets. Set 0 consists of all the exogenous variables. \pause \item For $j=1,\ldots ,k$, each endogenous variable in set $j$ is influenced by at least one variable in set $j-1$, and also possibly by variables in earlier sets. \pause \item Error terms may be correlated within sets, but not between sets. \pause \end{itemize} \vspace{5mm} Proof: Repeated application of the Regression Rule. \end{frame} \begin{frame} \frametitle{An Acyclic model} %\framesubtitle{} \begin{center} \includegraphics[width=4.5in]{AcyclicHand} \end{center} \end{frame} \begin{frame} \frametitle{Parameters of this model are just identifiable} %\framesubtitle{Example from Ch. 5 of Duncan's \emph{Introduction to Structural Equation Models}} \begin{center} \includegraphics[width=4in]{Duncan-Just-Ident} \end{center} Shows that the acyclic rule is sufficient but not necessary. \end{frame} \begin{frame} \frametitle{The Pinwheel Model} \framesubtitle{Parameters are identifiable} \begin{center} \includegraphics[width=2.5in]{Pinwheel} \end{center} \end{frame} \begin{frame} \frametitle{Model equations for the 3-node Pinwheel Model} \framesubtitle{Assume all variances positive etc.} \begin{eqnarray*} Y_1 &=& \beta_3 Y_3 + \gamma X + \epsilon_1 \\ Y_2 &=& \beta_1 Y_1 + \epsilon_2 \\ Y_3 &=& \beta_2 Y_2 + \epsilon_3 \end{eqnarray*} In matrix terms: \begin{displaymath} \left(\begin{array}{c} Y_1 \\ Y_2 \\ Y_3 \end{array} \right) = \left(\begin{array}{rrr} 0 & 0 & \beta_{3} \\ \beta_{1} & 0 & 0 \\ 0 & \beta_{2} & 0 \end{array}\right) \left(\begin{array}{c} Y_1 \\ Y_2 \\ Y_3 \end{array} \right) + \left(\begin{array}{c} \gamma \\ 0 \\ 0 \end{array} \right) X + \left(\begin{array}{c} \epsilon_1 \\ \epsilon_2 \\ \epsilon_3 \end{array} \right) \end{displaymath} \end{frame} \begin{frame} \frametitle{To get $cov(\mathbf{Y})$} \begin{eqnarray*} & & \mathbf{Y} = \boldsymbol{\beta} \mathbf{Y} + \boldsymbol{\Gamma} \mathbf{X} + \boldsymbol{\epsilon} \\ &\Rightarrow& \mathbf{Y} - \boldsymbol{\beta} \mathbf{Y} = \boldsymbol{\Gamma} \mathbf{X} + \boldsymbol{\epsilon} \\ &\Rightarrow& \mathbf{IY} - \boldsymbol{\beta} \mathbf{Y} = \boldsymbol{\Gamma} \mathbf{X} + \boldsymbol{\epsilon} \\ &\Rightarrow& (\mathbf{I} - \boldsymbol{\beta} )\mathbf{Y} = \boldsymbol{\Gamma} \mathbf{X} + \boldsymbol{\epsilon} \end{eqnarray*} \vspace{5mm} $(\mathbf{I} - \boldsymbol{\beta})^{-1}$ exists when $|\mathbf{I} - \boldsymbol{\beta}|\neq 0$ \begin{displaymath} \mathbf{I} - \boldsymbol{\beta} = \left(\begin{array}{rrr} 1 & 0 & -\beta_{3} \\ -\beta_{1} & 1 & 0 \\ 0 & -\beta_{2} & 1 \end{array}\right) \end{displaymath} \end{frame} \begin{frame}[fragile] \frametitle{Calculate the determinant using Sage} %\framesubtitle{} {\footnotesize % or scriptsize \begin{verbatim} sem = 'http://www.utstat.toronto.edu/~brunner/openSEM/sage/sem.sage' load(sem) B = ZeroMatrix(3,3) B[0,2] = var('beta3'); B[1,0] = var('beta1'); B[2,1] = var('beta2') ImB = IdentityMatrix(3)-B show( ImB.determinant() ) \end{verbatim} } % End size \begin{displaymath} -\beta_{1} \beta_{2} \beta_{3} + 1 \end{displaymath} \vspace{3mm} So the inverse will exist unless $\beta_{1} \beta_{2} \beta_{3} = 1$. \end{frame} \begin{frame} \frametitle{Solve for $Y_3$} Starting with the model equations \begin{eqnarray*} Y_1 &=& \beta_3 Y_3 + \gamma X + \epsilon_1 \\ Y_2 &=& \beta_1 Y_1 + \epsilon_2 \\ Y_3 &=& \beta_2 Y_2 + \epsilon_3 \end{eqnarray*} \vspace{3mm} \begin{eqnarray*} && Y_3 = \beta_1\beta_2\beta_3Y_3 + \beta_1\beta_2\gamma X + \beta_1\beta_2\epsilon_1 + \beta_2\epsilon_2 + \epsilon_3 \\ & \Rightarrow & Y_3(1-\beta_1\beta_2\beta_3) = \beta_1\beta_2\gamma X + \beta_1\beta_2\epsilon_1 + \beta_2\epsilon_2 + \epsilon_3 \\ \end{eqnarray*} What happens if $(\mathbf{I} - \boldsymbol{\beta})^{-1}$ does not exist (and $\gamma \neq 0$)? \end{frame} \begin{frame} \frametitle{If $\beta_{1} \beta_{2} \beta_{3} = 1$} \framesubtitle{Meaning that $(\mathbf{I} - \boldsymbol{\beta})^{-1}$ does not exist} \begin{eqnarray*} && Y_3(1-\beta_1\beta_2\beta_3) = \beta_1\beta_2\gamma X + \beta_1\beta_2\epsilon_1 + \beta_2\epsilon_2 + \epsilon_3 \\ & \Rightarrow & 0 = \beta_1\beta_2\gamma X + \beta_1\beta_2\epsilon_1 + \beta_2\epsilon_2 + \epsilon_3 \\ & \Rightarrow & E(X\cdot 0) = E\left(X (\beta_1\beta_2\gamma X + \beta_1\beta_2\epsilon_1 + \beta_2\epsilon_2 + \epsilon_3)\right) \\ & \Rightarrow & 0 = \beta_1\beta_2\gamma E(X^2) + 0 \\ & \Rightarrow & \beta_1\beta_2\gamma \phi = 0 \end{eqnarray*} with $\beta_1, \beta_2, \gamma$ and $\phi$ all non-zero. \vspace{3mm} So $\beta_{1} \beta_{2} \beta_{3} = 1$ contradicts the model. \end{frame} \begin{frame} \frametitle{Under the assumptions of the pinwheel model} \begin{itemize} \item $(\mathbf{I} - \boldsymbol{\beta})^{-1}$ exists. \item $\beta_{1} \beta_{2} \beta_{3} \neq 1$. \item The surface $\beta_{1} \beta_{2} \beta_{3} = 1$ forms a \emph{hole} in the parameter space. \end{itemize} \end{frame} \begin{frame} \frametitle{Covariance matrix of the factors: $\boldsymbol{\Phi}$} \framesubtitle{Factors are $X$, $Y_1$, $Y_2$, $Y_3$} \begin{columns} % Use Beamer's columns to use more of the margins! \column{1.2\textwidth} {\scriptsize \begin{displaymath} \left(\begin{array}{cccc} \phi & -\frac{\gamma \phi}{\beta_{1} \beta_{2} \beta_{3} - 1} & -\frac{\beta_{1} \gamma \phi}{\beta_{1} \beta_{2} \beta_{3} - 1} & -\frac{\beta_{1} \beta_{2} \gamma \phi}{\beta_{1} \beta_{2} \beta_{3} - 1} \\ & \frac{\beta_{2}^{2} \beta_{3}^{2} \psi_{2} + \beta_{3}^{2} \psi_{3} + \gamma^{2} \phi + \psi_{1}}{{\left(\beta_{1} \beta_{2} \beta_{3} - 1\right)}^{2}} & \frac{\beta_{1} \beta_{3}^{2} \psi_{3} + \beta_{1} \gamma^{2} \phi + \beta_{2} \beta_{3} \psi_{2} + \beta_{1} \psi_{1}}{{\left(\beta_{1} \beta_{2} \beta_{3} - 1\right)}^{2}} & \frac{\beta_{1} \beta_{2} \gamma^{2} \phi + \beta_{2}^{2} \beta_{3} \psi_{2} + \beta_{1} \beta_{2} \psi_{1} + \beta_{3} \psi_{3}}{{\left(\beta_{1} \beta_{2} \beta_{3} - 1\right)}^{2}} \\ & & \frac{\beta_{1}^{2} \beta_{3}^{2} \psi_{3} + \beta_{1}^{2} \gamma^{2} \phi + \beta_{1}^{2} \psi_{1} + \psi_{2}}{{\left(\beta_{1} \beta_{2} \beta_{3} - 1\right)}^{2}} & \frac{\beta_{1}^{2} \beta_{2} \gamma^{2} \phi + \beta_{1}^{2} \beta_{2} \psi_{1} + \beta_{1} \beta_{3} \psi_{3} + \beta_{2} \psi_{2}}{{\left(\beta_{1} \beta_{2} \beta_{3} - 1\right)}^{2}} \\ & & & \frac{\beta_{1}^{2} \beta_{2}^{2} \gamma^{2} \phi + \beta_{1}^{2} \beta_{2}^{2} \psi_{1} + \beta_{2}^{2} \psi_{2} + \psi_{3}}{{\left(\beta_{1} \beta_{2} \beta_{3} - 1\right)}^{2}} \end{array}\right) \end{displaymath} } % End size \end{columns} \vspace{5mm} \begin{itemize} \item $\phi=\phi_{11}$, $\beta_1 = \frac{\phi_{13}}{\phi_{12}}$ and $\beta_2 = \frac{\phi_{14}}{\phi_{13}}$ are easy. \item But then? \end{itemize} \end{frame} \begin{frame} \frametitle{Solutions exist provided $\beta_1, \beta_2, \beta_3$ are all non-zero.} \framesubtitle{Using Sage \ldots} \begin{eqnarray*} \beta_{3} &=& \frac{\phi_{12} \phi_{13} \phi_{23} - \phi_{13}^{2} \phi_{22}}{\phi_{12} \phi_{14} \phi_{33} - \phi_{13} \phi_{14} \phi_{23}} \\ && \\ \gamma &=& \frac{\phi_{12}^{2} \phi_{33} - 2 \, \phi_{12} \phi_{13} \phi_{23} + \phi_{13}^{2} \phi_{22}}{\phi_{11} \phi_{12} \phi_{33} - \phi_{11} \phi_{13} \phi_{23}} \\ && \\ \psi_{3} &=& \frac{{\left(\phi_{13} \phi_{44} - \phi_{14} \phi_{34}\right)} {\left(\phi_{12}^{2} \phi_{33} - 2 \, \phi_{12} \phi_{13} \phi_{23} + \phi_{13}^{2} \phi_{22}\right)}}{{\left(\phi_{12} \phi_{33} - \phi_{13} \phi_{23}\right)} \phi_{12} \phi_{13}} \\ && \\ \psi_{2} &=& \frac{\phi_{12}^{2} \phi_{33} - 2 \, \phi_{12} \phi_{13} \phi_{23} + \phi_{13}^{2} \phi_{22}}{\phi_{12}^{2}} \\ && \\ \psi_1 & = & \beta_{1}^{2} \beta_{2}^{2} \beta_{3}^{2} \phi_{22} - \beta_{2}^{2} \beta_{3}^{2} \psi_{2} - 2 \, \beta_{1} \beta_{2} \beta_{3} \phi_{22} - \beta_{3}^{2} \psi_{3} - \gamma^{2} \phi + \phi_{22} \end{eqnarray*} \end{frame} \begin{frame} \frametitle{Parameters of this pinwheel model are identifiable} %\framesubtitle{} \begin{itemize} \item Even though it does not fit any known rules \item And the proof is very difficult. \end{itemize} \end{frame} \section{Measurement Model Rules} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Measurement Model Rules} \framesubtitle{Factor Analysis} \pause \begin{itemize} \item In these rules, latent variables that are not error terms are called ``factors." \pause \item Unless otherwise noted, factors may have non-zero covariances with each other. \pause \item All the models are surrogate models. \end{itemize} \end{frame} \begin{frame} \frametitle{Double Measurement Rule} \framesubtitle{This has been proved} \pause Model parameters are identifiable provided \pause \vspace{5mm} \begin{itemize} \item Each factor is measured twice. \pause \item All factor loadings equal one. \pause \item There are two sets of measurements, set one and set two. \pause \item Correlated measurement errors are allowed within sets of measurements, but not between sets. \end{itemize} $\blacksquare$ \end{frame} \begin{frame} \frametitle{Three-Variable Rule for Standardized Factors} \framesubtitle{This has been proved} \pause Model parameters are identifiable provided \pause \vspace{5mm} \begin{itemize} \item Errors are independent of one another. \pause \item Each observed variable is influenced by only one factor. \pause \item The variance of each factor equals one. \pause \item There are at least 3 observed variables with non-zero loadings per factor. \pause \item The sign of one non-zero loading is known for each factor. \end{itemize} $\blacksquare$ \end{frame} \begin{frame} \frametitle{Three-Variable Rule for Unstandardized Factors} \framesubtitle{This has been proved} \pause Model parameters are identifiable provided \pause \vspace{5mm} \begin{itemize} \item Errors are independent of one another. \pause \item Each observed variable is influenced by only one factor. \pause \item For each factor, at least one factor loading equals one. \pause \item There are at least 2 additional observed variables with non-zero loadings per factor. \end{itemize} $\blacksquare$ \end{frame} \begin{frame} \frametitle{Two-Variable Rule for Standardized Factors} \pause %\framesubtitle{} A factor with just two observed variables may be added to a measurement model whose parameters are identifiable, and the parameters of the combined model will be identifiable provided \pause \begin{itemize} \item The errors for the two additional observed variables are independent of one another and of those already in the model. \pause \item The two new observed variables are influenced only by the new factor. \pause \item The variance of the new factor equals one. \pause \item Both new factor loadings are non-zero. \pause \item The sign of one new loading is known. \pause \item The new factor has a non-zero covariance with at least one factor already in the model. \end{itemize} $\blacksquare$ \end{frame} \begin{frame} \frametitle{Two-Variable Rule for Unstandardized Factors} \pause %\framesubtitle{} A factor with just two observed variables may be added to a measurement model whose parameters are identifiable, and the parameters of the combined model will be identifiable provided \pause \begin{itemize} \item The errors for the two additional observed variables are independent of one another and of those already in the model. \pause \item The two new observed variables are influenced only by the new factor. \pause \item At least one new factor loading equals one. \pause \item The other new factor loading is non-zero. \pause \item The new factor has a non-zero covariance with at least one factor already in the model. \end{itemize} $\blacksquare$ \end{frame} \begin{frame} \frametitle{Four-variable Two-factor Rule} \pause %\framesubtitle{} The parameters of a measurement model with two factors and four observed variables will be identifiable provided \pause \begin{itemize} \item All errors are independent of one another. \pause \item Each observed variable is influenced by only one factor. \pause \item Two observed variables are influenced by one factor, and two are influenced by the other. \pause \item All factor loadings are non-zero. \pause \item For each factor, either the variance of the factor equals one and the sign of one new loading is known, or at least one factor loading equals one. \pause \item The covariance of the two factors does not equal zero. \end{itemize} $\blacksquare$ \end{frame} \begin{frame} \frametitle{Proof of the Four-variable Two-factor Rule} \framesubtitle{With standardized factors} \pause The model equations are \begin{eqnarray*} D_1 & = & \lambda_1 F_1 + e_1 \\ D_2 & = & \lambda_2 F_1 + e_2 \\ D_3 & = & \lambda_3 F_2 + e_3 \\ D_4 & = & \lambda_4 F_2 + e_4, \end{eqnarray*} \pause where all expected values are zero, $Var(e_j)=\omega_j$ for $j=1, \ldots, 4$, and \pause \begin{displaymath} \begin{array}{ccc} % Array of Arrays: Nice display of matrices. cov\left(\begin{array}{c} F_1 \\ F_2 \end{array} \right) & = & \left( \begin{array}{c c} 1 & \phi_{12} \\ \phi_{12} & 1 \end{array} \right) \end{array} \end{displaymath} \pause with $\phi_{12}\neq 0$. \pause Also suppose $\lambda_1>0$, $\lambda_2 \neq 0$, $\lambda_3>0$ and $\lambda_4 \neq 0$. \end{frame} \begin{frame} \frametitle{Covariance matrix} \framesubtitle{For the 4-variable 2-factor problem} \pause {\footnotesize \begin{eqnarray*} D_1 & = & \lambda_1 F_1 + e_1 \\ D_2 & = & \lambda_2 F_1 + e_2 \\ D_3 & = & \lambda_3 F_2 + e_3 \\ D_4 & = & \lambda_4 F_2 + e_4, \end{eqnarray*} \pause } % End size \begin{displaymath} \boldsymbol{\Sigma} ~~~=~~~ \begin{array}{c|cccc} & D_1 & D_2 & D_3 & D_4 \\ \hline D_1 & \lambda_1^2+\omega_1 &\lambda_1\lambda_2 & \lambda_1\lambda_3\phi_{12} & \lambda_1\lambda_4 \phi_{12} \\ D_2 & & \lambda_2^2+\omega_2 & \lambda_2\lambda_3 \phi_{12} & \lambda_2\lambda_4 \phi_{12} \\ D_3 & & & \lambda_3^2+\omega_3 & \lambda_3\lambda_4 \\ D_4 & & & & \lambda_4^2+\omega_4 \end{array} \end{displaymath} \end{frame} \begin{frame} \frametitle{Using the assumption that $\lambda_1>0$ and $\lambda_3>0$} \pause %\framesubtitle{} {\footnotesize \begin{displaymath} \boldsymbol{\Sigma} ~~~=~~~ \begin{array}{c|cccc} & D_1 & D_2 & D_3 & D_4 \\ \hline D_1 & \lambda_1^2+\omega_1 &\lambda_1\lambda_2 & \lambda_1\lambda_3\phi_{12} & \lambda_1\lambda_4 \phi_{12} \\ D_2 & & \lambda_2^2+\omega_2 & \lambda_2\lambda_3 \phi_{12} & \lambda_2\lambda_4 \phi_{12} \\ D_3 & & & \lambda_3^2+\omega_3 & \lambda_3\lambda_4 \\ D_4 & & & & \lambda_4^2+\omega_4 \end{array} \end{displaymath} \pause } % End size \begin{eqnarray*} & & \frac{\sigma_{12}\sigma_{13}}{\sigma_{23}} = \frac{\lambda_1^2\lambda_2\lambda_3\phi_{12}}{\lambda_2\lambda_3 \phi_{12}} = \pause \lambda_1^2 \\ \pause && \\ & \Rightarrow & \lambda_1 = \sqrt{\frac{\sigma_{12}\sigma_{13}}{\sigma_{23}}} \pause \end{eqnarray*} Similarly, $ \lambda_3 = \sqrt{\frac{\sigma_{34}\sigma_{23}}{\sigma_{24}}}$, \pause and the rest is easy. \end{frame} \begin{frame} \frametitle{Please don't do both!} \framesubtitle{Don't set the variance \emph{and} a factor loading to one!} \pause \begin{itemize} \item Setting the variance of factors to one looks arbitrary, but it's really a smart re-parameterization. \pause \item Setting one loading per factor to one also is a smart re-parameterization. \pause \item It's smart because the resulting models impose the \emph{same restrictions on the covariance that the original model does.} \pause \item And, the \emph{meanings} of the parameters have a clear connection to the meanings of the parameters of the original model. \pause \item But if you do both, it's a mess. Most or all of the meaning is lost. \pause \item And you put an \emph{extra} restriction on $\boldsymbol{\Sigma}$ that is not implied by the original model. \end{itemize} % $\blacksquare$ \end{frame} \begin{frame} \frametitle{Combination Rule} \pause %\framesubtitle{} Suppose that the parameters of two measurement models are identifiable by any of the rules above. \pause The two models may be combined into a single model provided that the error terms of the first model are independent of the error terms in the second model. \pause The additional parameters of the combined model are the covariances between the two sets of factors, \pause and these are all identifiable. $\blacksquare$ \end{frame} \begin{frame} \frametitle{Cross-over Rule} \framesubtitle{This has been proved} \pause Suppose that \begin{itemize} \item The parameters of a measurement models are identifiable, and \pause \item For each factor there is at least one observed variable that is influenced only by that factor (with a non-zero factor loading). \pause \end{itemize} Then any number of new observed variables may be added to the model and the result is a model whose parameters are all identifiable, provided that \pause \begin{itemize} \item The error terms associated with the new observed variables are independent of the error terms in the existing model. \pause \end{itemize} Each new observed variable may be influenced by any or all of the factors, potentially resulting in a cross-over pattern in the path diagram. \pause The error terms associated with the new set of observed variables may be correlated with one another. Note that no new factors are added. $\blacksquare$ \end{frame} \begin{frame} \frametitle{Error-Free Rule} \framesubtitle{This has been proved} \pause A vector of observed variables may be added to the factors of a measurement model whose parameters are identifiable. \pause Suppose that \pause \begin{itemize} \item The new observed variables are independent of the errors in the measurement model, and \pause \item For each factor in the measurement model there is at least one observed variable that is influenced only by that factor (with a non-zero factor loading). \pause \end{itemize} Then the parameters of a new measurement model, where some of the observed variables are assumed to be measured without error, are identifiable. \pause The practical consequence is that variables assumed to be measured without error may be included in the latent component of a structural equation model, provided that the measurement model for the other variables has identifiable parameters. \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistical Sciences, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \href{http://www.utstat.toronto.edu/~brunner/oldclass/2101f19} {\small\texttt{http://www.utstat.toronto.edu/$^\sim$brunner/oldclass/2101f19}} \end{frame} \end{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{} %\framesubtitle{} \begin{itemize} \item \item \item \end{itemize} \end{frame} {\LARGE \begin{displaymath} \end{displaymath} } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%