diff options
| author | Philipp Le <philipp-le-prviat@freenet.de> | 2020-05-09 23:33:47 +0200 |
|---|---|---|
| committer | Philipp Le <philipp-le-prviat@freenet.de> | 2021-03-04 01:16:19 +0100 |
| commit | 6650bc28d949931491770fb5146ad81b1ccac191 (patch) | |
| tree | 73cf86725c6e43a1b2e3b73a2972fbb96f9dd37e /chapter02 | |
| parent | b08c6696547280a87871ac29ed0ff5a849637d0d (diff) | |
| download | dcs-lecture-notes-6650bc28d949931491770fb5146ad81b1ccac191.zip dcs-lecture-notes-6650bc28d949931491770fb5146ad81b1ccac191.tar.gz dcs-lecture-notes-6650bc28d949931491770fb5146ad81b1ccac191.tar.bz2 | |
WIP: Chapter 2 - LTI systems
Diffstat (limited to 'chapter02')
| -rw-r--r-- | chapter02/content_ch02.tex | 544 |
1 files changed, 532 insertions, 12 deletions
diff --git a/chapter02/content_ch02.tex b/chapter02/content_ch02.tex index 03c0841..f192067 100644 --- a/chapter02/content_ch02.tex +++ b/chapter02/content_ch02.tex @@ -630,6 +630,7 @@ Let's investigate the \index{rectangular function} rectangular function from Fig 0 & \qquad \text{if } \; |t| > \frac{1}{2}, \\ 1 & \qquad \text{if } \; |t| < \frac{1}{2} \end{cases} + \label{eq:ch02:rect_function} \end{equation} The function is undefined for $t = \pm \frac{1}{2}$. The function is now transformed, i.e., $\underline{x}(t) = \mathrm{rect}(t)$. @@ -786,6 +787,7 @@ The time domain is obtained by the respective inverse transform. \draw[-latex, thick] (SolFD.west) -- node[midway, above, align=center]{Inverse Transform} (SolTD.east); \end{tikzpicture} \caption{Explanation of the purpose of transforms} + \label{fig:ch02:benefit_of_transforms} \end{figure} \section{Properties of The Continuous Fourier Transform} @@ -918,7 +920,7 @@ The constants can be moved in front of the integrals. \begin{excursus}{Convolution} The convolution is defined to: \begin{equation} - f(t) * g(t) = \left(f * g\right) (t) = \int_{\tau = -\infty}^{\infty} f(\tau) g(t - \tau) \, \mathrm{d} \tau + f(t) * g(t) = \left(f * g\right) (t) = \int_{\tau = -\infty}^{\infty} f(\tau) g(t - \tau) \, \mathrm{d} \tau = \int_{\tau = -\infty}^{t - \infty} f(\tau) g(\tau) \, \mathrm{d} \tau \label{eq:ch02:def_convolution} \end{equation} \end{excursus} @@ -944,21 +946,39 @@ The constants can be moved in front of the integrals. \subsection{Duality} +From \eqref{eq:ch02:def_fourier_transform}, we have: +\begin{equation*} + \underline{X}(j \omega) = \int\limits_{t = -\infty}^{\infty} \underline{x}(t) \cdot e^{-j \omega t} \, \mathrm{d} t +\end{equation*} +Now, swap $t$ and $-\omega$. +\begin{equation} + \underline{X}(j t) = - \int\limits_{\omega = \infty}^{-\infty} \underline{x}(-\omega) \cdot e^{j \omega t} \, \mathrm{d} \omega +\end{equation} +Note that: +\begin{itemize} + \item The negative sign in front of the integral comes from $\mathrm{d} t \rightarrow - \mathrm{d} \omega$ + \item The integration limits $\pm \infty$ are reversed in their sign to $\mp \infty$. +\end{itemize} +Now, extend the right side with $\frac{1}{2 \pi} \cdot 2 \pi$. Let the negative sign be consumed by reversing the integration limits. +\begin{equation} + \underline{X}(j t) = \frac{1}{2 \pi} \int\limits_{\omega = -\infty}^{\infty} 2 \pi \underline{x}(-\omega) \cdot e^{j \omega t} \, \mathrm{d} \omega +\end{equation} +This equation resembles \eqref{eq:ch02:def_inv_fourier_transform}. +\begin{equation} + \underline{X}(j t) = \mathcal{F}^{-1} \left\{2 \pi \underline{x}(-\omega)\right\} +\end{equation} + \begin{definition}{Duality} Suppose $\underline{g}(t)$ has a Fourier transform $\underline{G}\left(j \omega\right)$, i.e., $\mathcal{F}\left\{\underline{g}(t)\right\} = \underline{G}\left(j \omega\right)$. The Fourier transform of $\underline{G}(t)$ is: \begin{equation} - \mathcal{F}\left\{\underline{G}(t)\right\} = 2 \pi \cdot \underline{g} \left(- j \omega\right) + \mathcal{F}\left\{\underline{G}(j t)\right\} = 2 \pi \cdot \underline{g} \left(-\omega\right) \label{eq:ch02:op_duality} \end{equation} + + The variables $t$ and $\omega$ are swapped in both the original function and its Fourier transform. \end{definition} -An example for the duality is, the frequency shift. We already know the Fourier transform of the time shift \eqref{eq:ch02:op_time_shift}. -\begin{equation} - \mathcal{F}\left\{e^{j \omega_0 t} \underline{f}(t)\right\} = \underbrace{\underline{F} \left(j (\omega - \omega_0)\right)}_{= \mathcal{F}\left\{\underline{f}(t)\right\} \left( j (\omega - \omega_0) \right)} - \label{eq:ch02:op_freq_shift} -\end{equation} - -Another example is the convolution in time-domain. Due to the duality, it becomes a multiplication the frequency domain. +An example for the duality is the convolution in time-domain. Due to the duality, it becomes a multiplication in the frequency domain. \begin{equation} \mathcal{F}\left\{ \underline{f}(t) * \underline{f}(t) \right\} = \mathcal{F}\left\{\underline{f}(t)\right\} \cdot \mathcal{F}\left\{\underline{g}(t)\right\} \label{eq:ch02:op_conv} @@ -1003,7 +1023,7 @@ Another example is the convolution in time-domain. Due to the duality, it become \end{itemize} \end{itemize} -\subsection{Dirac Delta Function} +\subsection{Dirac Delta Function} \label{sec:ch02_dirac_impulse} An important distribution is the \index{Dirac delta function} \textbf{Dirac delta function} $\delta(t)$. The Dirac delta function is zero everywhere except at its origin, where it is an indefinitely narrow, indefinitely high pulse. \begin{equation} @@ -1031,6 +1051,7 @@ A special feature of the function is called \index{Dirac measure} \textbf{Dirac Using the Dirac measure, the Fourier transform can be calculated: \begin{equation} \mathcal{F} \left\{\delta(t)\right\} = \int\limits_{-\infty}^{\infty} \delta(t) \cdot e^{-j \omega t} \; \mathrm{d} t = 1 + \label{eq:ch02:ft_dirac} \end{equation} The Fourier transform of the Dirac delta function is the frequency-independent constant $1$. @@ -1071,13 +1092,512 @@ The Fourier transform of the Dirac delta function is the frequency-independent c \section{\acs{LTI} Systems} +\begin{definition}{System} + A system is an entity or a process which responds to an input signal with an output signal. + + \begin{figure}[H] + \centering + \begin{tikzpicture} + \node[draw, block] (System) {System\\ $\underline{h}(t)$}; + \draw[<-o] (System.west) -- ++(-2cm, 0) node[above, align=center]{Input signal\\ $\underline{x}(t)$}; + \draw[->] (System.east) -- ++(2cm, 0) node[above, align=center]{Output signal\\ $\underline{y}(t)$}; + \end{tikzpicture} + \caption{A system with input and output} + \end{figure} +\end{definition} + +\subsection{Classification of Systems} + +\begin{description} + \item[Deterministic or stochastic] A deterministic system is predictable at any time, whereas a stochastic system is based on a random process. + \item[Causal or non-causal] A system is causal if its output signal $\underline{y}(t_0)$ at the time instance $t_0$ only relies on input signal values of the past, i.e., $\underline{x}(t)$ with $ t \leq t_0$. A is system is non-causal if its output signal also depends on future values of the input signal. + \item[Linear or non-linear] A linear system fulfils the \textbf{superposition} principle: $y = S(a x_1 + b x_2) = a S(x_1) + b S(x_2)$. + \item[Time-variant or time-invariant] In a time-invariant system, input signals can be arbitrarily shifted in time, and the system produces an output signal shifted in the same time, i.e., $y(t - \Delta t) = S(x(t - \Delta t))$. In contrast, a system which changes its behaviour over time is time-variant. + \item[Time-discrete or time-continuous] This is analogous to time-discrete and time-continuous signals. + \item[One-dimensional or multidimensional] A one-dimensional system has one pair of input and output signals. A multidimensional system processes a vector $\cmplxvect{x}(t)$ of input signals and produces a vector $\cmplxvect{y}(t)$ of output signals. +\end{description} + +The focus of this lecture is on \acf{LTI} systems. Systems in this chapter are furthermore deterministic. + \subsection{Transfer Function} +Each deterministic system can be analytically described. + +Let's consider an example electrical network with the input voltage $u_i(t)$ and the output voltage $u_o(t)$. +\begin{figure}[H] + \centering + \begin{circuitikz} + \draw (0, 0) to[R, l=$R$, o-] ++(2,0) to[L, l=$L$] ++(2,0) to[short, *-o] ++(2,0); + \draw (4, 0) to[C, l=$C$, -*] ++(0,-2); + \draw (0, -2) to[short, o-o] ++(6,0); + + \draw (0, 0) to[open, v=$u_i(t)$] (0, -2); + \draw (6, 0) to[open, v^=$u_o(t)$] (6, -2); + \end{circuitikz} + \caption{An example system: a electrical network (2nd order low pass)} +\end{figure} +Using Kirchhoff's circuit laws, the following relation can be determined. +\begin{equation} + L C \frac{\mathrm{d}^2 u_o(t)}{\mathrm{d} t^2} + R C \frac{\mathrm{d} u_o(t)}{\mathrm{d} t} + u_o(t) = u_e(t) +\end{equation} + +It is a second order differential equation. The order corresponds to the number of memories in the system. + +Memorizable quantities are for example: +\begin{itemize} + \item Energy (e.g. in the form of electrical charge, voltage or current) + \item Information +\end{itemize} +Examples for memories are: +\begin{itemize} + \item Capacitors (electrical energy) + \item Inductances (magnetic energy) + \item Spring (potential energy) + \item Flip-flop (for digital information) +\end{itemize} + +\textbf{How can the output signal of the network $u_o(t)$ determined in relation to a known input signal $u_i(t)$?} + +In the time domain, it is hard to solve the differential equation. Remember the explanation of the benefits of transforms (Figure \ref{fig:ch02:benefit_of_transforms}). + +Let's use the Fourier transform to formulate a simpler problem. +\begin{equation*} + L C \frac{\mathrm{d}^2 u_o(t)}{\mathrm{d} t^2} + R C \frac{\mathrm{d} u_o(t)}{\mathrm{d} t} + u_o(t) = u_i(t) +\end{equation*} +is transformed to +\begin{equation} + \begin{split} + \left(j \omega\right)^2 L C \underline{U}_o\left(j \omega\right) + \left(j \omega\right) R C \underline{U}_o\left(j \omega\right) + \underline{U}_o\left(j \omega\right) &= \underline{U}_i\left(j \omega\right) \\ + \left(\left(j \omega\right)^2 L C + \left(j \omega\right) R C + 1\right) \underline{U}_o\left(j \omega\right) &= \underline{U}_i\left(j \omega\right) \\ + \left(- \omega^2 L C + j \omega R C + 1\right) \underline{U}_o\left(j \omega\right) &= \underline{U}_i\left(j \omega\right) + \end{split} +\end{equation} + +Now, we can formulate a simple relationship in the frequency domain between the input signal $\underline{U}_i\left(j \omega\right)$ and the output signal $\underline{U}_o\left(j \omega\right)$, which are both now in frequency domain. +\begin{equation} + \underline{U}_o\left(j \omega\right) = \underline{H} \left(j \omega\right) \cdot \underline{U}_i\left(j \omega\right) +\end{equation} + +$\underline{H} \left(j \omega\right)$ is the \index{transfer function} \textbf{transfer function} which fully describes a deterministic \ac{LTI} system. + +In our electric network, the transfer function is: +\begin{equation} + \underline{H} \left(j \omega\right) = \frac{1}{- \omega^2 L C + j \omega R C + 1} +\end{equation} + +In general: +\begin{definition}{Transfer function} + The \index{transfer function} \textbf{transfer function} of a system with the input $\underline{X}\left(j \omega\right) \InversTransformHoriz \underline{x}(t)$ and the output $\underline{Y}\left(j \omega\right) \InversTransformHoriz \underline{y}(t)$ is: + \begin{equation} + \underline{H} \left(j \omega\right) = \frac{\underline{Y}\left(j \omega\right)}{\underline{X}\left(j \omega\right)} + \label{eq:ch02:tranfer_func} + \end{equation} +\end{definition} + +\begin{excursus}{I'm lost! I have a non-linear system.} + Your are not! Under certain conditions, a non-linear system can be linearized. + + Using the \index{Taylor series} \textbf{Taylor series}, a non-linear function can be split into a series of polynomials. + \begin{equation} + f(t) = \sum_{n=0}^{\infty} \left. \frac{1}{n!} \frac{\mathrm{d}^n f(t)}{\mathrm{d} t^n} \right|_{t = t_0} \left(t - t_0\right)^n + \end{equation} + The Taylor series is developed around a specific \index{operating point} \textbf{operating point} $t_0$. + + Using only the polynomials of orders $n = 0$ and $n = 1$, we obtain a linear equation. This procedure is called \index{Taylor approximation} \textbf{Taylor approximation}. + \begin{equation} + f(t) \approx f(t_0) + \underbrace{\left. \frac{\mathrm{d} f(t)}{\mathrm{d} t} \right|_{t = t_0}}_{= f'(t_0)} \left(t - t_0\right) \qquad \forall \; |t - t_0| \rightarrow 0 + \end{equation} + The Taylor approximation using just the polynomials up to $n = 1$ is called \index{linearization} \textbf{linearization}. The approximation is only valid around the operating point $t_0$, i.e., $t$ is constrained $|t - t_0| < \xi$. The approximation gets more imprecise for greater $\xi$. + + Linearization is a common method in electronic circuit analysis to model semiconductors, which are always non-linear. Semiconductors are based on an $e$-function. +\end{excursus} + + \subsection{Impulse Response} -% Convolution +Let's transfer \eqref{eq:ch02:tranfer_func} back to the time domain. +\begin{equation} + \underline{y}(t) = \underline{h}(t) * \underline{x}(t) +\end{equation} +The convolution (see \eqref{eq:ch02:def_convolution}) can be resolved to: +\begin{equation} + \underline{y}(t) = \int_{\tau = -\infty}^{\infty} \underline{h}(\tau) \underline{x}(t - \tau) \, \mathrm{d} \tau = \int_{\tau = -\infty}^{\infty} \underline{h}(t - \tau) \underline{x}(\tau) \, \mathrm{d} \tau +\end{equation} + +Now, the input signal is set to the Dirac delta function $\delta(t)$ (see \eqref{eq:ch02:dirac_delta}). +\begin{equation} + \begin{split} + \underline{y}_\delta(t) &= \underline{h}(t) * \underline{x}(t) \\ + &= \int_{\tau = -\infty}^{\infty} \underline{h}(\tau) \delta(t - \tau) \, \mathrm{d} \tau \\ + &= \underline{h}(t) + \end{split} +\end{equation} + +\begin{itemize} + \item The convolution can be seen as the Dirac pulse shifted across the time axis and then multiplied by $\underline{h}(t)$ and integrated. + \item Using the Dirac measure \eqref{eq:ch02:dirac_measure}, the integral becomes zero for all $\tau \neq t$. + \item The integral becomes $\underline{h}(t)$ for all $\tau = t$. +\end{itemize} + +\begin{definition}{Impulse response} + The inverse Fourier transform of the transfer function $\underline{H} \left(j \omega\right)$ is the \index{impulse response} \textbf{impulse response} $\underline{h}(t)$. + \begin{equation} + \underline{h}(t) = \mathcal{F}^{-1}\left\{\underline{H} \left(j \omega\right)\right\} + \end{equation} +\end{definition} + +The name ``impulse response'' directly connected to the Dirac delta function. The Dirac delta function can be seen as an ideal impulse (indefinitely narrow width in time, indefinitely high in value). Giving this impulse as an input into a system yields the impulse response. + +\begin{fact} + The impulse response is the time-domain response of a system to an ideal impulse (Dirac delta function). +\end{fact} + +So, theoretically, you can obtain the impulse response -- and thereby a complete definition of the system -- by feeding a Dirac delta function into its input. Of course, this is not possible because a Dirac delta function cannot be generated in nature. However, it can be approximated (e.g. ultra-narrow and high-voltage pulse), yielding the impulse response of the ``system under test'' with uncertainties. + +\begin{proof}{} + A proof in the frequency domain is simple. + + Assume that $\underline{x}(t) = \delta(t)$, then $\underline{X}\left(j \omega\right) = 1$ (see \eqref{eq:ch02:ft_dirac}). Using \eqref{eq:ch02:tranfer_func}, + \begin{equation} + \underline{Y}_\delta\left(j \omega\right) = \underline{H}\left(j \omega\right) \cdot \underbrace{\underline{X}\left(j \omega\right)}_{= 1} = \underline{H}\left(j \omega\right) + \end{equation} + + Transform back to the time domain: + \begin{equation} + \underline{y}_\delta(t) = \underline{h}(t) + \end{equation} +\end{proof} + +You have learnt that any deterministic \ac{LTI} system can be expressed by: +\begin{itemize} + \item transfer function + \item impulse response +\end{itemize} + +\subsection{Causality} + +As already described in the introduction to this subsection, a system is causal if it does not rely on future input values. -\subsection{Poles and Zeroes} +\begin{definition}{Causality of a system} + The impulse response of a stable system is constrained by: + \begin{equation} + \underline{h}(t) = 0 \qquad \forall \; t < 0 + \end{equation} +\end{definition} + +Let's take rectangle function \eqref{eq:ch02:rect_function} as an example for the transfer function of a system: +\begin{equation} + \underline{H}\left(j \omega\right) = \mathrm{rect}(\omega) = \begin{cases} + 0 & \qquad \text{if } \; |\omega| > \frac{1}{2}, \\ + 1 & \qquad \text{if } \; |\omega| < \frac{1}{2} + \end{cases} +\end{equation} +The inverse Fourier transform is, due to the duality, a sinc-function in time-domain: +\begin{figure}[H] + \centering + \begin{tikzpicture} + \begin{axis}[ + height={0.25\textheight}, + width=0.6\linewidth, + scale only axis, + xlabel={$t$}, + ylabel={$|\underline{h}(t)|$}, + %grid style={line width=.6pt, color=lightgray}, + %grid=both, + grid=none, + legend pos=north east, + axis y line=middle, + axis x line=middle, + every axis x label/.style={ + at={(ticklabel* cs:1.05)}, + anchor=north, + }, + every axis y label/.style={ + at={(ticklabel* cs:1.05)}, + anchor=east, + }, + xmin=-52, + xmax=52, + ymin=0, + ymax=1.2, + xtick={-50, -40, ..., 50}, + ytick={0, 0.25, ..., 1.0} + ] + \addplot[blue, thick, smooth, domain=-50:50, samples=200] plot (\x,{abs(sinc((1/(2*pi))*\x))}); + \addlegendentry{Non-causal system impulse response}; + \addplot[red, thick, smooth, domain=0:50, samples=200] plot (\x,{0.5*abs(sinc((1/(2*pi))*\x))}); + \addlegendentry{Causal system impulse response}; + \end{axis} + \end{tikzpicture} + \caption{Impulse response of a system with a rectangular function as its transfer function} +\end{figure} +The system's impulse response $\underline{h}(t)$ (blue curve) is also defined in $t < 0$. Therefore, it is non-causal. + +If $\underline{h}(t)$ is constrained to be zero for all $t < 0$ (red curve). It would be causal. However, the transfer function $\underline{H}\left(j \omega\right)$ would be different. + +\begin{fact} + Non-causal systems need to reliably predict future values of the input signal. Therefore, it is not possible to implement them in real. +\end{fact} + +\subsection{Zeroes and Poles} + +\begin{excursus}{Relationship between Fourier transform and Laplace transform} + The \index{Laplace transform} Laplace transform is another integral transform, which is closely related to the Fourier transform. The (two-sided) Laplace transform is defined by: + \begin{equation} + \underline{X}(\underline{s}) = \mathcal{L} \left\{\underline{x}(t)\right\} = \int\limits_{t = -\infty}^{\infty} \underline{x}(t) e^{- \underline{s} t} \, \mathrm{d} t + \end{equation} + $\underline{x}(t)$ is the original (time-domain) function. $X(\underline{s})$ is the Laplace transform in frequency domain. $\underline{s}$ is the complex frequency variable, corresponding to $j \omega$ in the Fourier transform. In fact, $\underline{s}$ can be decomposed to: + \begin{equation} + \underline{s} = \sigma + j \omega + \end{equation} + The frequency variable of the Laplace transform is two-dimensional, whereas the frequency variable of the Fourier transform is one-dimensional. + + \begin{figure}[H] + \centering + \begin{tikzpicture} + \draw[->] (-2.2,0) -- (2.2,0) node[below, align=left]{$\Re\left\{\underline{s}\right\}$};
+ \draw[->] (0,-2.2) -- (0,2.2) node[left, align=right]{$\Im\left\{\underline{s}\right\}$}; + \draw[thick, red] (0,-2) -- (0,2); + \draw[dashed, red] (0,1) -- (1,1.2) node[right, align=left, color=red]{$j \omega$}; + \end{tikzpicture} + \caption{Complex plane of the complex frequency variable $\underline{s}$} + \end{figure} + + The Fourier transform is a special case of the Laplace transform, setting $\sigma = 0$. The complex frequency variable becomes $\underline{s} = j \omega$. In the complex plane, the frequency variable $j \omega$ is only the imaginary axis. + + \vspace{0.5em} + + \textbf{Why two transforms?} Actually, the Laplace transform fully describes the system, including transients. Fourier transform neglects the transients due to $\sigma = 0$ and expects a \textbf{steady-state}. + \begin{itemize} + \item The Laplace transform is used for a full analysis of the system, especially transient analysis. Therefore, it is used in control engineering to describe components of a control system and analyse its stability (zeros and poles). + \item The Fourier transform is suitable for steady-state analysis of a system. That means, that any on/off switching process has happened far in the past $t \rightarrow -\infty$. + \end{itemize} + + However, in the literature Fourier transform is inconsistently used to analyse the poles and zeros of a system. In fact, a Laplace transform is behind this. But, that is just a notation issue, because $\omega$ is used instead of $\underline{s}$. + + The Fourier transform is more common in the field of signal processing and information theory, because signals are mostly steady-state and transients are of minor interest. That's why we stick to it. +\end{excursus} + +In system analysis, zeros and poles play an important role. +\begin{itemize} + \item $\underline{s}_0$ is a zero of the system with the transfer function $\underline{H}(\underline{s})$, when $\underline{H}(\underline{s}_0) = 0$. + \item $\underline{s}_\infty$ is a pole of the system with the transfer function $\underline{H}(\underline{s})$, when $\underline{H}(\underline{s}_\infty) \rightarrow \pm \infty$. +\end{itemize} + +$\underline{H}(\underline{s})$ can be written as a fraction with polynomials in both the numerator and denominator. +\begin{equation} + \underline{H}(\underline{s}) = \frac{(\underline{s} - \underline{s}_{0,0}) (\underline{s} - \underline{s}_{0,1}) \ldots (\underline{s} - \underline{s}_{0,p})}{(\underline{s} - \underline{s}_{\infty,0}) (\underline{s} - \underline{s}_{\infty,1}) \ldots (\underline{s} - \underline{s}_{\infty,q})} +\end{equation} +\begin{itemize} + \item Zeros $\underline{s}_0$ are all values for $\underline{s}$ that make the numerator zero. + \item Poles $\underline{s}_\infty$ are all values for $\underline{s}$ that make the denominator zero. + \item There can be multiple zeros and poles with the same value. Counting their number yields their order. +\end{itemize} + +Let's investigate the following example: +\begin{equation} + \underline{H}(\underline{s}) = \frac{\underline{s}^2 - 2 j \underline{s} - 1}{\underline{s}^2 - 4} = \frac{(\underline{s} - j)^2}{(\underline{s} + 2) (\underline{s} - 2)} +\end{equation} +The system has +\begin{itemize} + \item a zero of 2nd order at $\underline{s}_{0,0} = j$, + \item a pole of 1st order at $\underline{s}_{\infty,0} = 2$, and + \item a pole of 1st order at $\underline{s}_{\infty,1} = -2$. +\end{itemize} +\begin{figure}[H] + \centering + \begin{tikzpicture} + \begin{axis}[ + height={0.25\textheight}, + width=0.6\linewidth, + scale only axis, + xlabel={$\Re\left\{\underline{s}_\infty\right\} = \sigma$}, + ylabel={$\Im\left\{\underline{s}_\infty\right\} = \omega$}, + %grid style={line width=.6pt, color=lightgray}, + %grid=both, + grid=none, + legend pos=north east, + axis y line=middle, + axis x line=middle, + every axis x label/.style={ + at={(ticklabel* cs:1.05)}, + anchor=north, + }, + every axis y label/.style={ + at={(ticklabel* cs:1.05)}, + anchor=east, + }, + xmin=-3, + xmax=3, + ymin=-3, + ymax=3, + xtick={-2, -1, ..., 2}, + ytick={-2, -1, ..., 2}, + ] + \addplot[red, only marks, mark=o] coordinates {(0, 1)}; + \addlegendentry{Zeros}; + \addplot[red, only marks, mark=x] coordinates {(2, 0) (-2, 0)}; + \addlegendentry{Poles}; + \end{axis} + \end{tikzpicture} + \caption{Zeros and poles of the example system} +\end{figure} + +The zeros and poles are used to analyse the stability of a system. +\begin{itemize} + \item The input signal $|\underline{x}(t)| < \infty \quad \forall \; t \in \mathbb{R}$ is bounded, i.e. not infinite. + \item A stable system always emits a output signal $|\underline{x}(t)| < \infty \quad \forall \; t \in \mathbb{R}$ which is bounded, too. + \item This is called \index{BIBO stability} \textbf{\ac{BIBO} stability}. + \item To archive \ac{BIBO} stability, all poles must be on the left side of the complex plane including the imaginary axis: $\Re\left\{\underline{s}_\infty\right\} \stackrel{!}{\leq} 0$. +\end{itemize} + +\subsection{Amplitude and Phase Response} + +The complex transfer function of a system can be decomposed to polar coordinates: +\begin{equation} + \underline{H}\left(j \omega\right) = A(\omega) \cdot e^{j \varphi(\omega)} +\end{equation} +Both $A(\omega)$ and $\varphi(\omega)$ are functions of the angular frequency $\omega$. + +Each input signal $\underline{x}(t) \TransformHoriz \underline{X}\left(j \omega\right)$ is subject to +\begin{itemize} + \item a change in amplitude, called \emph{gain} or \index{amplitude response} \textbf{amplitude response} $A(\omega)$: $|\underline{Y}\left(j \omega\right)| = A(\omega) \cdot |\underline{X}\left(j \omega\right)|$ + \item a shift in phase, called \emph{phase shift} or \index{phase response} \textbf{phase response} $\varphi(\omega)$: $\arg\left(\underline{Y}\left(j \omega\right)\right) = \varphi(\omega) + \arg\left(\underline{X}\left(j \omega\right)\right)$ +\end{itemize} + +For each mono-chromatic component of the signal at an angular frequency of $\omega$, the gain and phase shift can be illustrated: +\begin{figure}[H] + \centering + \begin{tikzpicture} + \begin{axis}[ + height={0.25\textheight}, + width=0.6\linewidth, + scale only axis, + xlabel={$t$}, + ylabel={$x(t), y(t)$}, + %grid style={line width=.6pt, color=lightgray}, + %grid=both, + grid=none, + legend pos=north east, + axis y line=middle, + axis x line=middle, + every axis x label/.style={ + at={(ticklabel* cs:1.05)}, + anchor=north, + }, + every axis y label/.style={ + at={(ticklabel* cs:1.05)}, + anchor=east, + }, + xmin=-3, + xmax=3, + ymin=-3, + ymax=3, + xtick={-2, -1, ..., 2}, + ytick={-2, -1, ..., 2}, + ] + \addplot[blue, domain=-2:2, samples=100] plot (\x, {2*sin(360*\x)}); + \addlegendentry{$x(t)$}; + \addplot[red, domain=-2:2, samples=100] plot (\x, {sin((360*\x - 45)}); + \addlegendentry{$y(t)$}; + \end{axis} + \end{tikzpicture} + \caption[Gain and phase shift of a mono-chromatic signal]{Gain and phase shift of a mono-chromatic signal. Here, the gain $A = 0.5$ and the phase shift is $\varphi = \frac{\pi}{4} = \SI{45}{\degree}$.} + \label{fig:ch02:gain_phase_shift} +\end{figure} + +\textit{Remarks:} +\begin{itemize} + \item The system does never change the frequency of a mono-chromatic signal, because the system is linear. + \item The envelope (shape of the signal) of a multi-frequent signals may however be altered. Each mono-chromatic component is subject to its own change by $\underline{H}\left(j \omega\right)$. +\end{itemize} + +The values of both the amplitude response $A(\omega)$ and the phase response $\varphi(\omega)$ can be plotted over the angular frequency $\omega$. + +\todo{Plots} + +\subsection{Ideal Filters} + +All ideal filters are non-causal and can therefore not be implemented in real. + +\subsubsection{Ideal Low Pass Filter} + +A \index{low pass filter} \textbf{\acf{LPF}} +\begin{itemize} + \item lets pass all signals below a \index{low pass filter!cut-off frequency} \textbf{cut-off frequency} $\omega_o$ (all signals within the \index{low pass filter!pass band} \textbf{pass band} $|\omega| < \omega_o$), + \item blocks all signals above the cut-off frequency $\omega_o$ (all signals within the \index{low pass filter!stopband} \textbf{stopband} $|\omega| > \omega_o$). +\end{itemize} + +\begin{equation} + \underline{H}_{TPF}\left(j \omega\right) = \mathrm{rect}\left(\frac{1}{2} \cdot \frac{\omega}{\omega_o}\right) = \begin{cases} + 0 & \qquad \text{if } \; |\omega| > \omega_o, \\ + 1 & \qquad \text{if } \; |\omega| < \omega_o + \end{cases} +\end{equation} + +\todo{Amplitude response} + +\subsubsection{Ideal High Pass Filter} + +A \index{high pass filter} \textbf{\acf{HPF}} +\begin{itemize} + \item blocks all signals below a \index{high pass filter!cut-off frequency} \textbf{cut-off frequency} $\omega_o$ (all signals within the \index{high pass filter!stopband} \textbf{stopband} $|\omega| < \omega_o$), + \item lets pass all signals above the cut-off frequency $\omega_o$ (all signals within the \index{high pass filter!pass band} \textbf{pass band} $|\omega| > \omega_o$). +\end{itemize} + +\begin{equation} + \underline{H}_{HPF}\left(j \omega\right) = 1 - \underbrace{\mathrm{rect}\left(\frac{1}{2} \cdot \frac{\omega}{\omega_o}\right)}_{\text{Equals low pass filter}} = \begin{cases} + 1 & \qquad \text{if } \; |\omega| > \omega_o, \\ + 0 & \qquad \text{if } \; |\omega| < \omega_o + \end{cases} +\end{equation} + +\todo{Amplitude response} + +\subsubsection{Ideal Band Pass Filter} + +A \index{band pass filter} \textbf{\acf{BPF}} +\begin{itemize} + \item lets pass all signals within a \index{band pass filter!pass band} \textbf{pass band} with the \index{band pass filter!bandwidth} \textbf{bandwidth} $\omega_b$ which is centred around the \index{band pass filter!centre frequency} \textbf{centre frequency} $\omega_c$: pass band $||\omega| - \omega_c| < \frac{\omega_b}{2}$ + \item blocks all signals outside the pass band: \index{band pass filter!stopband} \textbf{stopband} is everything outside the pass band +\end{itemize} + +\begin{equation} + \underline{H}_{HPF}\left(j \omega\right) = \underbrace{\mathcal{F}\left\{\cos\left(\omega_c t\right)\right\} * \underbrace{\mathrm{rect}\left(\frac{1}{2} \cdot \frac{\omega}{\omega_c}\right)}_{\text{Equals low pass filter}}}_{\text{``Two-sided frequency shift''}} = \begin{cases} + 1 & \qquad \text{if } \; ||\omega| - \omega_c| < \frac{\omega_b}{2}, \\ + 0 & \qquad \text{else} + \end{cases} +\end{equation} + +The \ac{BPF} can be seen as a \ac{LPF} frequency-shifted in both positive and negative direction by the centre frequency $\omega_c$. This special ``two-sided frequency shift'' will later be called \emph{modulation}. + +\todo{Amplitude response} + +\subsubsection{Ideal Band Elimination Filter} + +A \index{band elimination filter} \textbf{\acf{BEF}} +\begin{itemize} + \item blocks all signals within a \index{band eliminations filter!stopband} \textbf{stopband} with the \index{band elimination filter!bandwidth} \textbf{bandwidth} $\omega_b$ which is centred around the \index{band elimination filter!centre frequency} \textbf{centre frequency} $\omega_c$: stopband $||\omega| - \omega_c| < \frac{\omega_b}{2}$ + \item lets pass all signals outside the pass band: \index{band elimination filter!pass band} \textbf{pass band} is everything outside the stopband +\end{itemize} + +\begin{equation} + \underline{H}_{HPF}\left(j \omega\right) = 1 - \underbrace{\left(\mathcal{F}\left\{\cos\left(\omega_c t\right)\right\} * \mathrm{rect}\left(\frac{1}{2} \cdot \frac{\omega}{\omega_c}\right)\right)}_{\text{Equals band pass filter}} = \begin{cases} + 0 & \qquad \text{if } \; ||\omega| - \omega_c| < \frac{\omega_b}{2}, \\ + 1 & \qquad \text{else} + \end{cases} +\end{equation} + +\todo{Amplitude response} + +\subsection{Realizable Filters} + +Realizable filters +\begin{itemize} + \item are causal, + \item have a real-valued slope between pass band and stopband instead of an ideal cut-off (step). + \item Their phase response $\varphi(\omega)$ is not constantly zero. +\end{itemize} \printbibliography[heading=subbibliography] \end{refsection} |
