% Some material is taken from 2015 slide set 15 (Powerpoint) % \documentclass[serif]{beamer} % Serif for Computer Modern math font. \documentclass[serif, handout]{beamer} % Handout mode to ignore pause statements \hypersetup{colorlinks,linkcolor=,urlcolor=red} \usefonttheme{serif} % Looks like Computer Modern for non-math text -- nice! \setbeamertemplate{navigation symbols}{} % Suppress navigation symbols % \usetheme{Berlin} % Displays sections on top \usetheme{Frankfurt} % Displays section titles on top: Fairly thin but still swallows some material at bottom of crowded slides %\usetheme{Berkeley} \usepackage[english]{babel} \usepackage{amsmath} % for binom % \usepackage{graphicx} % To include pdf files! % \definecolor{links}{HTML}{2A1B81} % \definecolor{links}{red} \setbeamertemplate{footline}[frame number] \mode \title{The General Structural Equation Model\footnote{See last slide for copyright information.}} \subtitle{STA2101 F19} \date{} % To suppress date \begin{document} \begin{frame} \titlepage \end{frame} \begin{frame} \frametitle{Features of Structural Equation Models} %\framesubtitle{} \begin{itemize} \item Multiple equations. \item All the variables are random. \item An explanatory variable in one equation can be the response variable in another equation. \item Models are represented by path diagrams. \item Identifiability is always an issue. \pause \item The statistical models are explicitly models of influence. They are often called \emph{causal models}. \end{itemize} \end{frame} \begin{frame} \frametitle{Correlation versus Causation} %\framesubtitle{} \begin{itemize} \item The path diagrams deliberately imply influence. If $A \rightarrow B$, we are saying $A$ \emph{contributes} to $B$, or partly \emph{causes} it. \item Data are usually observational. The correlation-causation issue does not go away. \item You may be able to argue on theoretical grounds that $A \rightarrow B$ is more believable than $B \rightarrow A$. \item A statistical model cannot ``prove" causality, but if you have a causal model, you may be able to test whether it's compatible with the data. \end{itemize} \end{frame} \begin{frame} \frametitle{Vocabulary} %\framesubtitle{} { \small \begin{itemize} \item \textbf{Exogenous variable}: In the regression-like equations of a structural equation model, the exogenous variables are ones that appear \emph{only} on the right side of the equals sign, and never on the left side in any equation. If you think of $Y$ being a function of $X$, this is one way to remember the meaning of \textbf{ex}ogenous. \pause % All error terms are exogenous variables. \item \textbf{Endogenous variable}: Endogenous variables are those that appear on the left side of at least one equals sign. Endogenous variables depend on the exogenous varables, and possibly other endogenous variables. Think of an arrow from an exogenous variable to an endogenous variable. The \textbf{end} of the arrow is pointing at the \textbf{end}ogenous variable. \pause \item \textbf{Factor}: This term has a meaning that actually conflicts with its meaning in mainstream Statistics, particularly in experimental design. A \emph{factor} is an underlying trait or characteristic that cannot be measured directly, like intelligence. It is a latent variable, period. \end{itemize} } % End size \end{frame} \begin{frame} \frametitle{Notation} % Modest changes in notation %\framesubtitle{} \begin{eqnarray*} Y_{i,1} &=& \alpha_1 + \gamma_1 X_{i,1} + \gamma_2 X_{i,2} + \epsilon_{i,1} \\ Y_{i,2} &=& \alpha_2 + \beta Y_{i,1} + \epsilon_{i,2} \end{eqnarray*} \pause \begin{itemize} \item Regression coefficients (links between exogenous variables and endogenous variables) are now called gamma instead of beta. \pause \item Betas are used for links between endogenous variables. \pause \item Intercepts are alphas but they will soon disappear. % \pause % \item We feel free to drop the subscript $i$ if we want to; implicitly, everything is independent and identically distributed for $i=1, \ldots, n$. \end{itemize} \end{frame} \begin{frame} \frametitle{Losing the intercepts and expected values} \pause %\framesubtitle{} \begin{itemize} \item Mostly the intercepts and expected values are not identifiable anyway, as in multiple regression with measurement error. \pause \item We have a chance to identify a \emph{function} of the parameter vector –-- the parameters that appear in the covariance matrix $\boldsymbol{\Sigma}$ of an observable data vector. \pause $\boldsymbol{\Sigma} = cov(\mathbf{D}_i)$. \pause \item Denote the vector of parameters that appear in $\boldsymbol{\Sigma}$ by $\boldsymbol{\theta}$. \pause \item Re-parameterize. The new parameter vector is $(\boldsymbol{\theta},\boldsymbol{\mu})$, where $\boldsymbol{\mu} = E(\mathbf{D}_i)$. \pause \item Estimate $\boldsymbol{\mu}$ with $\overline{\mathbf{D}}$, forget it, and concentrate on $\boldsymbol{\theta}$. \pause \item From this point on the models \emph{seemingly} have zero means, and no intercepts. \end{itemize} \end{frame} \begin{frame} \frametitle{A General Two-Stage Model} \framesubtitle{Stage 1 is the latent variable model and Stage 2 is the measurement model.} \pause \begin{eqnarray*} \mathbf{Y}_i &=& \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \pause \mathbf{F}_i &=& \left( \begin{array}{c} \mathbf{X}_i \\ \mathbf{Y}_i \end{array} \right) \\ \pause \mathbf{D}_i &=& \boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i \end{eqnarray*} \pause \begin{itemize} \item $\mathbf{D}_i$ (the data) are observable. All other variables are latent. \pause \item $\mathbf{Y}_i = \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i$ is called the \emph{Latent Variable Model}. \pause \item The latent vectors $\mathbf{X}_i$ and $\mathbf{Y}_i$ are collected into a \emph{factor} $\mathbf{F}_i$. \pause This is \emph{not} a categorical explanatory variable, the usual meaning of ``factor" in experimental design. \pause \item $\mathbf{D}_i = \boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i$ is called the \emph{Measurement Model}. \end{itemize} \end{frame} \begin{frame} \frametitle{$\mathbf{Y}_i= \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i$ ~~ $\mathbf{F}_i = \left( \begin{array}{c} \mathbf{X}_i \\ \mathbf{Y}_i \end{array} \right)$ ~~ $\mathbf{D}_i = \boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i$} \pause % \framesubtitle{} \begin{itemize} \item $\mathbf{Y}_i$ is a $q \times 1$ random vector. \pause \item $\boldsymbol{\beta}$ is a $q \times q$ matrix of constants with zeros on the main diagonal. \pause \item $\mathbf{X}_i$ is a $p \times 1$ random vector. \pause \item $\boldsymbol{\Gamma}$ is a $q \times p$ matrix of constants. \pause \item $\boldsymbol{\epsilon}_i$ is a $q \times 1$ random vector. \pause \item $\mathbf{F}_i$ ($F$ for Factor) is just $\mathbf{X}_i$ stacked on top of $\mathbf{Y}_i$. It is a $(p+q) \times 1$ random vector. \pause \item $\mathbf{D}_i$ is a $k \times 1$ random vector. \pause Sometimes, $\mathbf{D}_i = \left( \begin{array}{c} \mathbf{W}_i \\ \mathbf{V}_i \end{array} \right)$. \pause \item $\boldsymbol{\Lambda}$ is a $k \times (p+q)$ matrix of constants: ``factor loadings." \pause \item $\mathbf{e}_i$ is a $k \times 1$ random vector. \pause \item $\mathbf{X}_i$, $\boldsymbol{\epsilon}_i$ and $\mathbf{e}_i$ are independent. \end{itemize} \end{frame} \begin{frame} \frametitle{Covariance matrices} \framesubtitle{All assumed positive definite unless otherwise specified} \pause \begin{eqnarray*} \mathbf{Y}_i &=& \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \mathbf{F}_i &=& \left( \begin{array}{c} \mathbf{X}_i \\ \mathbf{Y}_i \end{array} \right) \\ \mathbf{D}_i &=& \boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i \\ &&\\ \pause cov(\mathbf{X}_i) &=& \boldsymbol{\Phi}_x \\ cov(\boldsymbol{\epsilon}_i) &=& \boldsymbol{\Psi} \\ cov(\mathbf{F}_i) &=& \boldsymbol{\Phi} = \left( \begin{array}{c c} cov(\mathbf{X}_i) & cov(\mathbf{X}_i,\mathbf{Y}_i) \\ cov(\mathbf{Y}_i,\mathbf{X}_i) & cov(\mathbf{Y}_i) \end{array} \right) = \left( \begin{array}{c c} \boldsymbol{\Phi}_{11} & \boldsymbol{\Phi}_{12} \\ \boldsymbol{\Phi}_{12}^\top & \boldsymbol{\Phi}_{22} \\ \end{array} \right) \\ cov(\mathbf{e}_i) &=& \boldsymbol{\Omega} \\ cov(\mathbf{D}_i) &=& \boldsymbol{\Sigma} \\ \end{eqnarray*} \end{frame} \begin{frame} \frametitle{Example: A Path Model with Measurement Error} \begin{columns} \column{0.5\textwidth} \begin{center} \includegraphics[width=2.5in]{mepath} % Ugh! Powerpoint 2015. Sorry! \end{center} \pause \column{0.5\textwidth} %{\footnotesize \begin{eqnarray*} Y_{i,1} &=& \gamma_1 X_i + \epsilon_{i,1} \\ Y_{i,2} &=& \beta Y_{i,1} + \gamma_2 X_i + \epsilon_{i,2} \\ W_i &=& X_i + e_{i,1} \\ V_{i,1} &=& Y_{i,1} + e_{i,2} \\ V_{i,2} &=& Y_{i,2} + e_{i,3} \end{eqnarray*} %} % End size \end{columns} \end{frame} \begin{frame} % A huge slide \frametitle{Matrix Form} %\framesubtitle{} %\begin{center} \begin{tabular}{ccc} \includegraphics[width=1in]{mepath} & \raisebox{.45in}{ \begin{minipage}{1.5in} {\footnotesize \begin{eqnarray*} Y_{i,1} &=& \gamma_1 X_i + \epsilon_{i,1} \\ Y_{i,2} &=& \beta Y_{i,1} + \gamma_2 X_i + \epsilon_{i,2} \\ W_i &=& X_i + e_{i,1} \\ V_{i,1} &=& Y_{i,1} + e_{i,2} \\ V_{i,2} &=& Y_{i,2} + e_{i,3} \end{eqnarray*} } % End size \end{minipage} } % End raisebox & \raisebox{.45in}{ \begin{minipage}{1in} {\footnotesize \begin{eqnarray*} \mathbf{Y}_i &=& \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \mathbf{F}_i &=& \left( \begin{array}{c} \mathbf{X}_i \\ \mathbf{Y}_i \end{array} \right) \\ \mathbf{D}_i &=& \boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i \end{eqnarray*} } % End size \end{minipage} } % End raisebox \end{tabular} \pause {\small \begin{displaymath} \begin{array}{cccccccccc} \mathbf{Y}_i &=& \boldsymbol{\beta} & \mathbf{Y}_i & + & \boldsymbol{\Gamma} & \mathbf{X}_i & + & \boldsymbol{\epsilon}_i \\ \left( \begin{array}{c} Y_{i,1} \\ Y_{i,2} \end{array} \right) & = & \left( \begin{array}{c c} 0 & 0 \\ \beta & 0 \\ \end{array} \right) & \left( \begin{array}{c} Y_{i,1} \\ Y_{i,2} \end{array} \right) & + & \left( \begin{array}{c} \gamma_1 \\ \gamma_2 \end{array} \right) & X_i & + & \left( \begin{array}{c} \epsilon_{i,1} \\ \epsilon_{i,2} \end{array} \right) \end{array} \pause \end{displaymath} \begin{displaymath} \begin{array}{cccccc} \mathbf{D}_i &=& \boldsymbol{\Lambda} & \mathbf{F}_i &+& \mathbf{e}_i \\ \left( \begin{array}{c} W_i \\ V_{i,1} \\ V_{i,2} \end{array} \right) & = & \left( \begin{array}{c c c} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 1 \end{array} \right) & \left( \begin{array}{c} X_i \\ Y_{i,1} \\ Y_{i,2} \end{array} \right) & + & \left( \begin{array}{c} e_{i,1} \\ e_{i,2} \\ e_{i,3} \end{array} \right) \end{array} \end{displaymath} % \pause } % End size % The rest just will not fit. Make it HW. % {\scriptsize % \vspace{2mm} % $cov(\mathbf{X}_i)=Var(X_i)= \phi$ \pause % $cov(\boldsymbol{\epsilon}_i)=\boldsymbol{\Psi} = % \left( \begin{array}{c c} % \psi_1 & 0 \\ % 0 & \psi_2 % \end{array} \right)$ \pause % $cov(\mathbf{e}_i)=\boldsymbol{\Omega} = % \left( \begin{array}{c c c} % \omega_1 & 0 & 0\\ % 0 & \omega_2 & 0 \\ % 0 & 0 & \omega_3 % \end{array} \right)$ %} % End size \end{frame} % End of huge slide \begin{frame} \frametitle{Observable variables in the ``latent" variable model $\mathbf{Y}_i = \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i$ } \framesubtitle{Fairly common} \pause \begin{itemize} \item These present no problem. \pause \item Let $P(e_j=0) = 1$, so $Var(e_j) = 0$. \pause \item And $Cov(e_i,e_j)=0$ \pause \item Because if $P(e_j=0) = 1$, \pause \begin{eqnarray*} Cov(e_i,e_j) &=& E(e_ie_j) - E(e_i)E(e_j) \\ \pause &=& E(e_i\cdot 0) - E(e_i)\cdot 0 \\ \pause &=& 0-0=0 \pause \end{eqnarray*} \item In $\boldsymbol{\Omega} = cov(\mathbf{e}_i)$, column $j$ (and row $j$) are all zeros. \pause \item $\boldsymbol{\Omega}$ singular, no problem. \end{itemize} \end{frame} \begin{frame} \frametitle{What should you be able to do?} \pause %\framesubtitle{} \begin{itemize} \item Given a path diagram, write the model equations and say which exogenous variables are correlated with each other. \pause \item Given the model equations and information about which exogenous variables are correlated with each other, draw the path diagram. \pause \item Given either piece of information, write the model in matrix form and say what all the matrices are. \pause \item Calculate model covariance matrices. \pause \item Check identifiability. \end{itemize} \end{frame} \begin{frame} \frametitle{Recall the notation} \begin{eqnarray*} \mathbf{Y}_i &=& \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \mathbf{F}_i &=& \left( \begin{array}{c} \mathbf{X}_i \\ \mathbf{Y}_i \end{array} \right) \\ \mathbf{D}_i &=& \boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i \\ &&\\ \pause cov(\mathbf{X}_i) &=& \boldsymbol{\Phi}_x \\ cov(\boldsymbol{\epsilon}_i) &=& \boldsymbol{\Psi} \\ cov(\mathbf{F}_i) &=& \boldsymbol{\Phi} = \left( \begin{array}{c c} cov(\mathbf{X}_i) & cov(\mathbf{X}_i,\mathbf{Y}_i) \\ cov(\mathbf{Y}_i,\mathbf{X}_i) & cov(\mathbf{Y}_i) \end{array} \right) = \left( \begin{array}{c c} \boldsymbol{\Phi}_{11} & \boldsymbol{\Phi}_{12} \\ \boldsymbol{\Phi}_{12}^\top & \boldsymbol{\Phi}_{22} \\ \end{array} \right) \\ cov(\mathbf{e}_i) &=& \boldsymbol{\Omega} \\ cov(\mathbf{D}_i) &=& \boldsymbol{\Sigma} \\ \end{eqnarray*} \pause Calculate a general expression for $\boldsymbol{\Sigma}(\boldsymbol{\theta})$. \end{frame} \begin{frame} \frametitle{For the latent variable model, calculate $\boldsymbol{\Phi} = cov(\mathbf{F}_i)$} \framesubtitle{Have $cov(\mathbf{X}_i) = \boldsymbol{\Phi}_x$, need $cov(\mathbf{Y}_i)$ and $cov(\mathbf{X}_i,\mathbf{Y}_i)$} \pause %{\footnotesize \begin{eqnarray*} & & \mathbf{Y}_i = \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \pause &\Rightarrow& \mathbf{Y}_i - \boldsymbol{\beta} \mathbf{Y}_i = \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \pause &\Rightarrow& \mathbf{IY}_i - \boldsymbol{\beta} \mathbf{Y}_i = \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \pause &\Rightarrow& (\mathbf{I} - \boldsymbol{\beta} )\mathbf{Y}_i = \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \pause & {\color{red} \Rightarrow} & (\mathbf{I} - \boldsymbol{\beta} )^{-1}(\mathbf{I} - \boldsymbol{\beta} )\mathbf{Y}_i = (\mathbf{I} - \boldsymbol{\beta} )^{-1}(\boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i) \\ \pause &\Rightarrow& \mathbf{Y}_i = (\mathbf{I} - \boldsymbol{\beta} )^{-1}(\boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i) \end{eqnarray*} \pause So, \begin{eqnarray*} cov(\mathbf{Y}_i) \pause &=& (\mathbf{I} - \boldsymbol{\beta} )^{-1}cov(\boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i)(\mathbf{I} - \boldsymbol{\beta} )^{-1\top} \\ \pause &=& (\mathbf{I} - \boldsymbol{\beta} )^{-1}\left(cov(\boldsymbol{\Gamma} \mathbf{X}_i) + cov(\boldsymbol{\epsilon}_i)\right) (\mathbf{I} - \boldsymbol{\beta}^\top )^{-1} \\ \pause &=& (\mathbf{I} - \boldsymbol{\beta} )^{-1} \left(\boldsymbol{\Gamma}\boldsymbol{\Phi}_x\boldsymbol{\Gamma}^\top + \boldsymbol{\Psi} \right) (\mathbf{I} - \boldsymbol{\beta}^\top )^{-1} \end{eqnarray*} %} % End size \end{frame} % That's a lot cleaner than the way I was doing it before. \begin{frame} \frametitle{Does $(\mathbf{I} - \boldsymbol{\beta} )^{-1}$ exist?} %\framesubtitle{} \begin{eqnarray*} && (\mathbf{I} - \boldsymbol{\beta} )\mathbf{Y}_i = \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \pause & \Rightarrow & cov\left((\mathbf{I} - \boldsymbol{\beta} )\mathbf{Y}_i\right) = cov\left(\boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i\right) \\ & \Rightarrow & (\mathbf{I} - \boldsymbol{\beta} ) cov(\mathbf{Y}_i) (\mathbf{I} - \boldsymbol{\beta} )^\top = \boldsymbol{\Gamma} cov(\mathbf{X}_i)\boldsymbol{\Gamma}^\top + cov(\boldsymbol{\epsilon}_i) \\ & \Rightarrow & (\mathbf{I} - \boldsymbol{\beta} ) cov(\mathbf{Y}_i) (\mathbf{I} - \boldsymbol{\beta} )^\top = \boldsymbol{\Gamma\Phi}_x\boldsymbol{\Gamma}^\top + \boldsymbol{\Psi} \\ \end{eqnarray*} Now let the $q \times 1$ constant vector $\mathbf{a} \neq \mathbf{0}$, and \begin{eqnarray*} \mathbf{a}^\top(\mathbf{I} - \boldsymbol{\beta} ) cov(\mathbf{Y}_i) (\mathbf{I} - \boldsymbol{\beta} )^\top \mathbf{a} & = & \mathbf{a}^\top\boldsymbol{\Gamma\Phi}_x\boldsymbol{\Gamma}^\top\mathbf{a} + \mathbf{a}^\top\boldsymbol{\Psi}\mathbf{a} \\ & > & 0 \end{eqnarray*} Because $cov\left(\boldsymbol{\Gamma} \mathbf{X}_i\right) = \boldsymbol{\Gamma\Phi}_x\boldsymbol{\Gamma}^\top$ is non-negative definite and $\boldsymbol{\Psi} = cov(\boldsymbol{\epsilon}_i)$ is positive definite. \pause Hence, $(\mathbf{I} - \boldsymbol{\beta} ) cov(\mathbf{Y}_i) (\mathbf{I} - \boldsymbol{\beta} )^\top$ is positive definite. \end{frame} \begin{frame} \frametitle{Have $(\mathbf{I} - \boldsymbol{\beta} ) cov(\mathbf{Y}_i) (\mathbf{I} - \boldsymbol{\beta} )^\top$ positive definite} \pause %\framesubtitle{} \begin{itemize} \item So the $q \times q$ matrix $(\mathbf{I} - \boldsymbol{\beta} ) cov(\mathbf{Y}_i) (\mathbf{I} - \boldsymbol{\beta} )^\top$ is full rank: rank is $q$.\pause \item $(\mathbf{I} - \boldsymbol{\beta} )$, $cov(\mathbf{Y}_i)$ and $(\mathbf{I}-\boldsymbol{\beta})^\top$ are all $q \times q$. \pause \item Rank of a product is minimum rank. \pause \item Hence Rank$(\mathbf{I}-\boldsymbol{\beta}) = q$, and the inverse exists. $\blacksquare$ \end{itemize} \end{frame} \begin{frame} \frametitle{$(\mathbf{I} - \boldsymbol{\beta} )^{-1}$ exists if the rest of the model is correct} %\framesubtitle{} \begin{itemize} \item This forms a strange and unexpected hole in the parameter space. \pause \item No need to ``assume" it, as all the textbooks do. \pause \item For example, if $\boldsymbol{\beta} = \mathbf{I}$, \pause then \begin{eqnarray*} & & \mathbf{Y}_i = \boldsymbol{\beta} \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \pause & \Rightarrow & \mathbf{Y}_i = \mathbf{Y}_i + \boldsymbol{\Gamma} \mathbf{X}_i + \boldsymbol{\epsilon}_i \\ \pause & \Rightarrow & \boldsymbol{\epsilon}_i = -\boldsymbol{\Gamma} \mathbf{X}_i\\ \pause \end{eqnarray*} Impossible if $\mathbf{X}_i$ and $\boldsymbol{\epsilon}_i$ are independent. \item Summary: The rest of the model places subtle restrictions on $\boldsymbol{\beta}$. \pause \end{itemize} But we were in the middle of a covariance calculation. \end{frame} \begin{frame} \frametitle{For the measurement model, calculate $\boldsymbol{\Sigma} = cov(\mathbf{D}_i)$} \pause %\framesubtitle{} {\LARGE \begin{eqnarray*} \mathbf{D}_i &=& \boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i \\ \pause \Rightarrow cov(\mathbf{D}_i) &=& cov(\boldsymbol{\Lambda}\mathbf{F}_i + \mathbf{e}_i) \\ \pause &=& cov(\boldsymbol{\Lambda}\mathbf{F}_i) + cov(\mathbf{e}_i) \\ \pause &=& \boldsymbol{\Lambda}cov(\mathbf{F}_i)\boldsymbol{\Lambda}^\top + cov(\mathbf{e}_i) \\ \pause &=& \boldsymbol{\Lambda}\boldsymbol{\Phi}\boldsymbol{\Lambda}^\top + \boldsymbol{\Omega} \\ \pause &=& \boldsymbol{\Sigma} \end{eqnarray*} } % End size \end{frame} \begin{frame} \frametitle{Two-stage Proofs of Identifiability} \framesubtitle{Stage 1 is the latent variable model and Stage 2 is the measurement model.} \pause \begin{itemize} \item Show the parameters of the latent variable model $(\boldsymbol{\beta}, \boldsymbol{\Gamma}, \boldsymbol{\Phi}_x, \boldsymbol{\Psi})$ can be recovered from $\boldsymbol{\Phi} = cov(\mathbf{F}_i)$. \pause \item Show the parameters of the measurement model $(\boldsymbol{\Lambda},\boldsymbol{\Phi},\boldsymbol{\Omega})$ can be recovered from $\boldsymbol{\Sigma} = cov(\mathbf{D}_i)$. \pause \item This means all the parameters can be recovered from $\boldsymbol{\Sigma}$. \pause \item Break a big problem into two smaller ones. \pause \item Develop \emph{rules} for checking identifiability at each stage. \pause \item Just look at the path diagram. \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{frame} \frametitle{Copyright Information} This slide show was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Statistical Sciences, University of Toronto. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \vspace{3mm} \href{http://www.utstat.toronto.edu/~brunner/oldclass/2101f19} {\small\texttt{http://www.utstat.toronto.edu/$^\sim$brunner/oldclass/2101f19}} \end{frame} \end{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {\LARGE \begin{displaymath} \end{displaymath} } \begin{frame} \frametitle{} %\framesubtitle{} \begin{itemize} \item \item \item \end{itemize} \end{frame} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%