summaryrefslogtreecommitdiff
path: root/chapter03/content_ch03.tex
diff options
context:
space:
mode:
Diffstat (limited to 'chapter03/content_ch03.tex')
-rw-r--r--chapter03/content_ch03.tex78
1 files changed, 72 insertions, 6 deletions
diff --git a/chapter03/content_ch03.tex b/chapter03/content_ch03.tex
index a9673c5..f869fc1 100644
--- a/chapter03/content_ch03.tex
+++ b/chapter03/content_ch03.tex
@@ -12,7 +12,7 @@
\subsection{Statistic Mean}
-Given is family of curves $\vect{x}(t) = \left\{x_1(t), x_2(t), \dots, x_n(t)\right\}$:
+Given is family of curves $\vect{x}(t) = \left\{x_1(t), x_2(t), \dots, x_n(t)\right\}$. $\vect{x}(t)$ is called a \index{random vector} \textbf{random vector}.
\begin{figure}[H]
\centering
@@ -64,14 +64,14 @@ Given is family of curves $\vect{x}(t) = \left\{x_1(t), x_2(t), \dots, x_n(t)\ri
\begin{itemize}
\item The curves are produced by a random process $\vect{x}(t)$. The random process is time-dependent.
\item All curves consist of random values, which are gathered around a mean value $\E\left\{\vect{x}(t)\right\}$.
- \item The random process can emit any value $x$. However, each value $x$ has a certain probability $p(x, t)$. Again, the probability is time-dependent like the stochastic process.
+ \item The random process can emit any value $x$. However, each value $x$ has a certain likelihood $p(x, t)$ of being produced. Again, this likelihood is time-dependent like the stochastic process.
\end{itemize}
Let's assume that the values are normally distributed. The \index{probability density function} \textbf{\ac{PDF}} $p(x, t)$ of a \index{normal distribution} \textbf{normal distribution} is:
\begin{equation}
p(x, t) = \frac{1}{\sigma(t) \sqrt{2 \pi}} e^{-\frac{1}{2} \left(\frac{x - \mu(t)}{\sigma(t)}\right)^2}
\end{equation}
-$p(x, t)$ is the probability that the stochastic process emits the value $x$ at time instance $t$. Both the mean of the normal distribution $\mu(t)$ and the standard deviation of the normal distribution $\sigma(t)$ are time-dependent.
+$p(x, t)$ is the likelihood that the stochastic process emits the value $x$ at time instance $t$. Both the mean of the normal distribution $\mu(t)$ and the standard deviation of the normal distribution $\sigma(t)$ are time-dependent.
\begin{attention}
Do not confuse the mean of the normal distribution $\mu$ and the mean of a series of samples $\E\left\{\cdot\right\}$ (expectation value)!
@@ -114,7 +114,7 @@ $p(x, t)$ is the probability that the stochastic process emits the value $x$ at
\addplot[black, very thick, dashed] coordinates {(1.47,0) (1.47,1)};
\end{axis}
\end{tikzpicture}
- \caption{Probability for an output value of a stochastic process at time $t_0$ with $\mu(t_0) = 1.47$ and $\sigma(t_0) = 0.5$}
+ \caption{Probability density function for an output value of a stochastic process at time $t_0$ with $\mu(t_0) = 1.47$ and $\sigma(t_0) = 0.5$}
\end{figure}
Given that
@@ -262,7 +262,7 @@ The \index{quadratic temporal mean} \textbf{quadratic temporal mean}:
As a consequence:
\begin{itemize}
\item One single, sufficiently long, random sample of the process is enough to deduct the statistical properties of an ergodic process.
- \item The ergodic process is in steady state, i.e., it does not erratically change its behaviour and properties.
+ \item The ergodic process is in steady state (\ac{WSS}), i.e., it does not erratically change its behaviour and properties.
\end{itemize}
\begin{figure}[H]
@@ -314,11 +314,77 @@ As a consequence:
\subsection{Cross-Correlation}
+\begin{itemize}
+ \item Imagine you have two random processes.
+ \item They produce the (complex) random vectors $\cmplxvect{x}(t)$ and $\cmplxvect{y}(t)$.
+ \item The random processes can be somehow related (correlated) to each other. But they can also be independent instead.
+ \item How can we find this out?
+\end{itemize}
+
+We need a similarity measure. The cross-correlation is such a measure.
+
+\begin{definition}{Cross-correlation of stochastic processes}
+ The \index{cross-correlation!stochastic process} \text{cross-correlation of two stochastic processes} $\cmplxvect{x}(t_1)$ and $\cmplxvect{y}(t_2)$ between the times $t_1$ and $t_2$ is:
+ \begin{equation}
+ \mathrm{R}_{XY}(t_1, t_2) = \E\left\{ \cmplxvect{x}(t_1) \cmplxvect{y}^{*}(t_2) \right\}
+ \end{equation}%
+ \nomenclature[Sr]{$\mathrm{R}_{XY}$}{Cross-correlation of two random vectors}%
+ \nomenclature[Na]{$\left(\cdot\right)^{*}$}{Complex conjugate of $\left(\cdot\right)$}
+ where $\left(\cdot\right)^{*}$ denotes the complex conjugate.
+\end{definition}
+
+The expectation value can be expressed as:
+\begin{equation}
+ \mathrm{R}_{XY}(t_1, t_2) = \E\left\{ \cmplxvect{x}(t_1) \cmplxvect{y}*(t_2) \right\} = \int\limits_{y = -\infty}^{\infty} \int\limits_{x = -\infty}^{\infty} x y \cdot p(x, y, t_1, t_2) \; \mathrm{d} x \mathrm{d} y
+\end{equation}
+$p(x, y, t_1, t_2)$ is the joint \ac{PDF} of the two random processes. It defines the likelihood that $x$ is produced at time $t_1$ \textbf{and} $y$ is produced at time $t_2$.
+
+Let's derive a special case for \textbf{ergodic} processes:
+\begin{itemize}
+ \item The time difference is $\tau = t_2 - t_1$.
+ \item Because of the ergodicity of the two processes, only one sample of each $x_i(t)$ and $y_i(t)$ needs to be taken.
+ \item An estimation for the cross-correlation is averaging the products of the time-shifted samples $x_i(t) \cdot y_i(t+\tau)$. This resembles
+\end{itemize}
+Extending this to complex number yields:
+\begin{equation}
+ \mathrm{R}_{XY}(\tau) = \E\left\{ \cmplxvect{x}(t) \cmplxvect{y}*(t+\tau) \right\} \approx \lim\limits_{T \rightarrow \infty} \frac{1}{T} \int\limits_{t = -\frac{T}{2}}^{\frac{T}{2}} \underline{x}_i^{*}(t) \cdot \underline{x}_i(t+\tau) \; \mathrm{d} t
+\end{equation}
+
+This resembles the cross-correlation of deterministic signals
+\begin{definition}{Cross-correlation of deterministic signals}
+ The \index{cross-correlation!deterministic signals} \text{cross-correlation of two deterministic signals} $\underline{f}(t_1)$ and $\underline{g}(t_2)$ between the times $\tau = t_2 - t_1$ is:
+ \begin{equation}
+ \left(f \star g\right)(\tau) = \int\limits_{t = -\infty}^{\infty} \underline{f}^{*}(t) \cdot \underline{g}(t+\tau) \; \mathrm{d} t
+ \end{equation}%
+ \nomenclature[N]{$\left(f \ast g\right)(\tau)$}{Cross-correlation of two signals}
+\end{definition}
+
+\begin{attention}
+ You must not confuse the operators for the convolution $*$ and correlation $\star$.
+\end{attention}
+
+For the random signals $x(t)$ and $y(t)$, the cross-correlation can not be determined analytically, but numerically.
+\begin{equation}
+ \mathrm{R}_{XY}(\tau) \approx \left(x \star y\right)(\tau)
+\end{equation}
+
+\paragraph{What's the use?}
+
+\begin{itemize}
+ \item The cross-correlation ``scans'' the two signals for common features.
+ \item The cross-correlation $\mathrm{R}_{XY}(\tau)$ will show a peak at the time shift $\tau$, if
+ \begin{itemize}
+ \item The signals are correlated, i.e., have a common feature.
+ \item The common feature is time-shifted by $\tau$.
+ \end{itemize}
+ \item A flat near $0$ cross-correlation means that the signals are uncorrelated.
+\end{itemize}
+
\section{Spectral Density}
\subsection{Autocorrelation}
-\subsection{Energy and Power Spectral Density}
+\subsection{Power Spectral Density}
\subsection{Decibel}