\documentclass[12pt]{article} %\usepackage{amsbsy} % for \boldsymbol and \pmb \usepackage{graphicx} % To include pdf files! \usepackage{amsmath} \usepackage{mathtools} % For symbol under multiple integrals \usepackage{amsbsy} \usepackage{amsfonts} \usepackage{euscript} % for \EuScript \usepackage[colorlinks=true, pdfstartview=FitV, linkcolor=blue, citecolor=blue, urlcolor=blue]{hyperref} % For links \usepackage{pdfpages} % To include possibly multi-page pdf documents: \includepdf{NormalTable.pdf} \oddsidemargin = -0.75in % Played with this \evensidemargin=0in \textwidth=6.3in \topmargin=-0.7in \headheight=0.1in \headsep=0.1in \textheight=9.4in % \usepackage[margin=0.25in]{geometry} \pagestyle{empty} % No page numbers \begin{document} % \enlargethispage*{1000 pt} \begin{center} {\Large \hspace{0.75in} \textbf{STA 256 Formulas}}\\ \vspace{1 mm} \end{center} \noindent \renewcommand{\arraystretch}{2.0} \begin{tabular}{llll} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Math %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% $ \displaystyle \sum_{k=j}^{\infty} a^k = \frac{a^j}{1-a}$ & $ \displaystyle \sum_{k=0}^{\infty} \frac{x^k}{k!} = e^x$ & ~~~~~ & $\displaystyle (a+b)^n = \sum_{k=0}^n \binom{n}{k} a^k b^{n-k}$ \\ \multicolumn{2}{l} { $ \lim_{x \rightarrow c} \frac{g(x)}{h(x)} = \lim_{x \rightarrow c} \frac{g^\prime(x)}{h^\prime(x)}$ if $\frac{0}{0}$ or $\frac{\infty}{\infty}$ etc.} & & $\displaystyle \lim_{n \rightarrow \infty}\left(1 + \frac{x}{n}\right)^n = e^x$ \\ $\Gamma(\alpha) = \int_0^\infty e^{-t} t^{\alpha-1} \, dt$ & $\Gamma(\alpha+1) = \alpha \, \Gamma(\alpha)$ & ~~~~~ & $\Gamma(\frac{1}{2}) = \sqrt{\pi} $ \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Sets %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Distributive Laws of Sets: & $A \cap \left(\cup_{j=1}^\infty B_j\right) = \cup_{j=1}^\infty (A \cap B_j)$ & ~~~~~ & $A \cup \left(\cap_{j=1}^\infty B_j\right) = \cap_{j=1}^\infty (A \cup B_j)$ \\ De Morgan Laws: & $(\cap_{j=1}^\infty A_j)^c = \cup_{j=1}^\infty A_j^c$ & ~~~~~ & $(\cup_{j=1}^\infty A_j)^c = \cap_{j=1}^\infty A_j^c$ \\ \end{tabular} \renewcommand{\arraystretch}{1.0} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Foundations %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \vspace{2mm} \noindent \hspace{3mm}Properties of probability: % Use hspace to line up with tabular -- oh well. \begin{enumerate} \item $0 \leq P(A) \leq 1$ for any $A \subseteq S$ \item $P(\emptyset) = 0$ \item $P(S)=1$ \item If $A_1, A_2 \ldots$ are disjoint subsets of $S$, $P\left( \cup_{k=1}^\infty A_k \right) = \sum_{k=1}^\infty P(A_k)$. \item $P(A^c) = 1-P(A)$ \item If $A \subseteq B$ then $P(A) \leq P(B)$ \item $P(A \cup B) = P(A)+P(B)-P(A\cap B)$ \item If $A_1 \subseteq A_2 \subseteq A_3 \subseteq \ldots$ and $A = \cup_{k=1}^\infty A_k$, then $\displaystyle \lim_{k \rightarrow \infty} P(A_k) = P(A)$. \item If $A_1 \supseteq A_2 \supseteq A_3 \supseteq \ldots$ and $A = \cap_{k=1}^\infty A_k$, then $\displaystyle \lim_{k \rightarrow \infty} P(A_k) = P(A)$. \end{enumerate} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Counting %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \noindent \renewcommand{\arraystretch}{1.5} \begin{tabular}{lll} $_nP_k = \frac{n!}{(n-k)!} $ & $ \binom{n}{k} = \frac{n!}{k! \, (n-k)!} $ & $\binom{n}{n_1~\cdots~k_\ell}=\frac{n!}{k_1!~\cdots~k_\ell!}$ \\ %%%%%%%%%%%%% Conditional probability and independence %%%%%%%%%%%%% $P(B|A) \stackrel{def}{=} \frac{P(A\cap B)}{P(A)}$ & $P(A\cap B) = P(A)P(B|A)$ & $P(B) = \sum_{k=1}^\infty P(B|A_k)P(A_k)$ \\ \multicolumn{2}{l} {$P(A_j|B) = \frac{P(A_j)P(B|A_j)}{\sum_{k=1}^\infty P(A_k)P(B|A_k)}$} & $P(A|B) = \frac{P(A)P(B|A)}{P(A)P(B|A) + P(A^c)P(B|A^c)}$ \\ \multicolumn{2}{l} {$A$ and $B$ independent means $P(A \cap B)=P(A)P(B)$ }& $P(\mbox{$k$ heads}) = \binom{n}{k}\theta^k(1-\theta)^{n-k}$ \\ %%%%%%%%%%%%%% Random Variables %%%%%%%%%%%%%%%%%%%% $F_{_X}(x) = P(X \leq x)$ & $\displaystyle \lim_{x \rightarrow - \infty} F(x) = 0$ & $\displaystyle \lim_{x \rightarrow \infty} F(x) = 1$ \\ If $X$ is continuous, & $P(X \in A) = \int_A f_{_X}(x) \, dx$ & $\frac{d}{dx} F_{_X}(x) = f_{_X}(x)$ \\ If $X$ is discrete, & $p_{_X}(x) \stackrel{def}{=} P(X=x)$ & \\ $F_{_{X,Y}}(x,y) = P(X \leq x, Y \leq y)$ & $\displaystyle \lim_{x \rightarrow \infty} F_{_{X,Y}}(x,y) = F_{_Y}(y)$ & $\displaystyle \lim_{y \rightarrow \infty} F_{_{X,Y}}(x,y) = F_{_X}(x)$ \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Joint Distributions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% If $X$ and $Y$ are discrete, & $p_{_{X,Y}}(x,y) \stackrel{def}{=} P(X=x, Y=y)$ & $p_{_X}(x) = \sum_y p_{_{X,Y}}(x,y)$ \\ If $X$ and $Y$ are continuous, & $P\{(X,Y) \in A\} = \iint\limits_A f_{_{X,Y}}(x,y) \, dx \,dy$ & $f_{_X}(x) = \int_{-\infty}^\infty f_{_{X,Y}}(x,y) \, dy$ \\ & $\frac{\partial^2}{\partial x \partial y} F_{_{X,Y}}(x,y) = f_{_{X,Y}}(x,y)$ & \\ \end{tabular} % Page break happens naturally here in 12 pt. \begin{tabular}{lcl} $p_{_{Y|X}}(y|x) \stackrel{def}{=} \frac{p_{_{X,Y}}(x,y)}{p_{_X}(x)}$ & ~ & $f_{_{Y|X}}(y|x) \stackrel{def}{=} \frac{f_{_{X,Y}}(x,y)} {f_{_X}(x)}$ \\ Independence: $ F_{_{X,Y}}(x,y) = F_{_X}(x)F_{_Y}(y)$ & $\Leftrightarrow$ & $p_{_{X,Y}}(x,y) = p_{_X}(x) \, p_{_Y}(y)$ or $f_{_{X,Y}}(x,y) = f_{_X}(x) \, f_{_Y}(y)$ \\ \multicolumn{3}{l} {Convolution formulas: If $X$ and $Y$ are independent random variables, and $Z = X + Y$ } \\ $ p_{_Z}(z) = \sum_x p_{_X}(x) p_{_Y}(z-x)$ & ~ & $f_z(z) = \int_{-\infty}^\infty f_{_X}(x) f_{_Y}(z-x) \, dx$ \\ \multicolumn{3}{l} {Jacobian formula: $Y_1 = g_1(X_1,X_2)$ and $Y_2 = g_2(X_1,X_2)$} \\ \multicolumn{3}{l} { \hspace{5mm} $ f_{_{Y_1,Y_2}}(y_1,y_2) = f_{_{X_1,X_2}}(\, x_1(y_1,y_2),x_2(y_1,y_2) \,) \cdot abs \renewcommand{\arraystretch}{1.5} \left| \begin{array}{cc} \frac{\partial x_1}{\partial y_1} & \frac{\partial x_1}{\partial y_2} \\ \frac{\partial x_2}{\partial y_1} & \frac{\partial x_2}{\partial y_2} \end{array}\right|$} \vspace{2mm} \\ \multicolumn{3}{l} { \hspace{5mm} $ f_{_{Y_1,Y_2}}(y_1,y_2) = f_{x_1x_2}(\, x_1(y_1,y_2),x_2(y_1,y_2) \,) \cdot abs \left(\frac{\partial x_1}{\partial y_1} \frac{\partial x_2}{\partial y_2} - \frac{\partial x_1}{\partial y_2} \frac{\partial x_2}{\partial y_1}\right)$ } \\ \multicolumn{3}{l} {Change to polar coordinates: $dx \, dy = r \, dr \, d\theta$} \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%% Expected value, variance and covariance %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% $E(X) \stackrel{def}{=} \sum_x x \, p_{_X}(x)$ or $\int_{-\infty}^\infty x \, f_{_X}(x) \, dx$ & ~ & $E(g(X)) = \sum_x g(x) \, p_{_X}(x)$ or $\int_{-\infty}^\infty g(x) \, f_{_X}(x) \, dx$ \\ $E\left(\sum_{i=1}^n a_iX_i \right) = \sum_{i=1}^n a_iE(X_i)$ & ~ & $E(X) = E(E[X|Y])$ \\ $Var(X) \stackrel{def}{=} E\left( (X-\mu)^2 \right)$ & ~ & $Var(X) = E(X^2)-[E(X)]^2$ \\ $Var(a+bX) = b^2Var(X)$ & ~ & $Var(aX+bY) = a^2Var(X)+b^2Var(Y)+2abCov(X,Y)$ \\ $Cov(X,Y) \stackrel{def}{=} E[(X-\mu_{_X})(Y-\mu_{_Y})]$ & ~ & $Cov(X,Y) = E(XY) - E(X)E(Y)$ \\ $Cov(a+bX,c+dY) = bd \, Cov(X,Y)$ & ~ & $Cov(X,aY+bZ) = a \, Cov(X,Y) + b \, Cov(X,Z)$ \\ \multicolumn{3}{l} {$Var\left(\sum_{i=1}^n a_iX_i \right) = \sum_{i=1}^n a_i^2Var(X_i) \, + \, \sum\sum_{i \neq j} a_ib_j Cov(X_i,X_j)$ } \\ \emph{Markov's inequality} & ~ & \emph{Chebyshev's inequality} \\ If $P(Y \geq 0)=1$, then $E(Y) \geq a \, P(Y \geq a)$ & ~ & $P(|X-\mu| \geq k\sigma) \leq \frac{1}{k^2}$ \\ $M_{_X}(t) \stackrel{def}{=} E(e^{Xt})$ & ~ & $M_{_X}^{(k)}(0) = E(X^k)$ \\ $M_{_{aX}}(t) = M_{_X}(at)$ & ~ & $M_{_{\sum X_i}}(t) = \prod_{i=1}^n M_{_{X_i}}(t)$ if the $X_i$ are independent. \\ %%%%%%%%%%%%%%%%%%%%%%%%%%%% Limits %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \multicolumn{3}{l} {\emph{Convergence in probability}: } \\ \multicolumn{3}{l} {$T_n \stackrel{p}{\rightarrow} c$ means for all $\epsilon>0$, $\displaystyle \lim_{n \rightarrow \infty}P\{|T_n-c|\geq\epsilon\} = 0 \Leftrightarrow \lim_{n \rightarrow \infty}P\{|T_n-c| < \epsilon\} = 1$ } \\ \multicolumn{3}{l} {Variance rule: If $\displaystyle \lim_{n \rightarrow \infty}E(T_n) = c$ and $\displaystyle \lim_{n \rightarrow \infty}Var(T_n) = 0$, then $T_n \stackrel{p}{\rightarrow} c$. } \\ \multicolumn{3}{l} {Law of Large Numbers: $\overline{X}_n \stackrel{p}{\rightarrow} \mu = E(X_i)$. } \\ \multicolumn{3}{l} {Continuous mapping: If $T_n \stackrel{p}{\rightarrow} c$ and $g(x)$ is continuous at $x=c$, then $g(T_n) \stackrel{p}{\rightarrow} g(c)$ } \\ \multicolumn{3}{l} {\emph{Convergence in Distribution}: } \\ \multicolumn{3}{l} {$X_n \stackrel{d}{\rightarrow} X$ means $\displaystyle \lim_{n \rightarrow \infty}F_{_{X_n}}(x) = F_{_X}(x)$ at every point where $F_{_X}(x)$ is continuous.} \\ \multicolumn{3}{l} {Central Limit Theorem: $Z_n = \frac{\sqrt{n}(\overline{X}_n-\mu)}{\sigma} \stackrel{d}{\rightarrow} Z \sim$ Normal (0,1).} \\ $T_n \stackrel{d}{\rightarrow} c \Leftrightarrow T_n \stackrel{p}{\rightarrow} c$. & & \\ \end{tabular} \renewcommand{\arraystretch}{1.0} % \vspace{5mm} % _{_X} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Distributions {\small \renewcommand{\arraystretch}{1.6} \noindent \begin{tabular}{|l|l|l|c|c|} \hline \textbf{Distribution} & \textbf{Density or probability mass function} & \textbf{MGF} $M_{_X}(t)$ & $E(X)$ & $Var(X)$ \\ \hline %%%%%%%%%%%%%% Discrete Distributions %%%%%%%%%%%%%% Bernoulli ($\theta$) & $p_{_X}(x) = \theta^x(1-\theta)^{1-x}$ for $x=0,1$ & $\theta e^t + 1-\theta $ & $\theta$ & $\theta(1-\theta)$ \\ \hline Binomial ($n,\theta$) & $p_{_X}(x) = \binom{n}{x}\theta^x(1-\theta)^{n-x}$ for $x = 0, \ldots, n$ & $(\theta e^t + 1-\theta )^n$ & $n\theta$ & $n\theta(1-\theta)$ \\ \hline Geometric ($\theta$) & $p_{_X}(x) = (1-\theta)^x\,\theta$ for $x = 0, 1, 2, \ldots$ & $\theta\left( 1 - (1-\theta)e^t \right)^{-1}$ & $\frac{1-\theta}{\theta}$ & $\frac{1-\theta}{\theta^2}$ \\ \hline Negative Binomial ($r,\theta$) & $\binom{x+r-1}{x} \theta^r \, (1-\theta)^x$ for $x = 0, 1, \ldots $ & ~~~~~ & & \\ \hline %Number of tails before $r$th head && \\ \hline Hypergeometric ($N,M,n$) & $p_{_X}(x) = \frac{ \binom{M}{x}\binom{N-M}{n-x} } {\binom{N}{n}}$, where $\binom{a}{b}$ must make sense. & ~~~~~ & & \\ \hline % $E(X) = \frac{nM}{N}$ %$N$ balls, $M$ white, choose $n$ && \\ Poisson ($\lambda$) & $p_{_X}(x) = \frac{e^{-\lambda}\, \lambda^x}{x!}$ for $x = 0, 1, \ldots $ & $e^{\lambda(e^t-1)}$ & $\lambda$ & $\lambda$ \\ \hline Multinomial ($n,\theta_1, \ldots, \theta_r$) & $p_{_\mathbf{X}}(x_1, \ldots, x_r) = \binom{n}{n_1 \cdots n_r } \theta_1^{x_1} \cdots \theta_r^{x_r}$ & ~~~~~ & & \\ \hline %%%%%%%%%%%%%% Continuous Distributions %%%%%%%%%%%%%% Uniform $(L,R)$ & $f_{_X}(x) = \frac{1}{R-L}$ for $L \leq x \leq R$ & $\frac{e^{Rt}-e^{Lt}}{t(R-L)}$ for $t\neq 0$ & $\frac{R-L}{2}$ & $\frac{(R-L)^2}{12}$ \\ \hline Exponential ($\lambda$) & $f_{_X}(x) = \lambda e^{-\lambda x}$ for $x \geq 0$ for $x \geq 0$ & $(1-\frac{t}{\lambda})^{-1}$ & $\frac{1}{\lambda}$ & $\frac{1}{\lambda^2}$\\ \hline Gamma ($\alpha,\lambda$) & $f_{_X}(x) = \frac{\lambda^\alpha}{\Gamma(\alpha)} e^{-\lambda x} \, x^{\alpha-1}$ for $x \geq 0$ & $(1-\frac{t}{\lambda})^{-\alpha}$ & $\frac{\alpha}{\lambda}$ & $\frac{\alpha}{\lambda^2}$ \\ \hline Normal ($\mu,\sigma^2$) & $\frac{1}{\sigma \sqrt{2\pi}}\exp - \left\{{\frac{(x-\mu)^2}{2\sigma^2}}\right\}$ & $e^{\mu t+\frac{1}{2}\sigma^2t^2}$ & $\mu$ & $\sigma^2$ \\ \hline Beta & $f_{_X}(x) = \frac{\Gamma(\alpha+\beta)}{\Gamma(\alpha)\Gamma(\beta)} \, x^{\alpha-1} (1-x)^{\beta-1}$ for $0 \leq x \leq 1$ & ~~~~~ & $\frac{\alpha}{\alpha+\beta}$ & \\ \hline % Variance of beta won't fit: $\frac{\alpha\beta}{(\alpha+\beta)^2(\alpha+\beta+1)}$ \end{tabular} \renewcommand{\arraystretch}{1.0} \noindent If $X \sim $ Exponential ($\lambda$), $F_{_X}(x) = 1-e^{-\lambda x}$. \hspace{5mm} If $X \sim $ Normal ($\mu,\sigma^2$), $\frac{X-\mu}{\sigma} \sim$ Normal(0,1) \vspace{90mm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \vspace{3mm} \hrule \vspace{3mm} \noindent This formula sheet was prepared by \href{http://www.utstat.toronto.edu/~brunner}{Jerry Brunner}, Department of Mathematical and Computational Sciences, University of Toronto Mississauga. It is licensed under a \href{http://creativecommons.org/licenses/by-sa/3.0/deed.en_US} {Creative Commons Attribution - ShareAlike 3.0 Unported License}. Use any part of it as you like and share the result freely. The \LaTeX~source code is available from the course website: \begin{center} \href{http://www.utstat.toronto.edu/~brunner/oldclass/256f19} {\texttt{http://www.utstat.toronto.edu/$^\sim$brunner/oldclass/256f19}} \end{center} } % End size % \pagebreak \includepdf{NormalTable.pdf} \end{document} % See 3c from 2018 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%