1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
|
\chapter{Time-Continuous Signals and Systems}
\begin{refsection}
All signals considered in this chapter are \index{signal!deterministic signal} \textbf{deterministic}, i.e., its values are predictable at any time. Especially, the values can be calculated by a mathematical equation. In contrast, \emph{random} signals are not predictable. Its values are subject to a random process, which must be modelled stochastically.
\index{signal!time-continuous}
\begin{figure}[H]
\centering
\begin{tikzpicture}
\draw node[block](Signals){\textbf{Signal}\\ \textbf{(deterministic)}};
\draw node[block, below left=of Signals](Periodic){Periodic};
\draw node[block, below right=of Signals](NonPeriodic){Non-periodic};
\draw node[block, below left=of Periodic](Mono){Mono-chromatic};
\draw node[block, below right=of Periodic](Multi){Multi-frequent};
\draw [-latex] (Signals) -- (Periodic);
\draw [-latex] (Signals) -- (NonPeriodic);
\draw [-latex] (Periodic) -- (Mono);
\draw [-latex] (Periodic) -- (Multi);
\end{tikzpicture}
\caption{Classification of time-continuous signals}
\label{fig:ch02:timecont_signals_classif}
\end{figure}
\section{Mono-Chromatic Signals}
\paragraph{Representation by A Real-Valued Function.}
The mono-chromatic signal $x_{mc}(t)$ is defined by:
\begin{equation}
x_{mc}(t) = \hat{X} \cdot \cos\left(\omega_0 t - \varphi_0\right)
\label{eq:ch02:mono_chrom_eq}
\end{equation}
where
\begin{tabular}{ll}
$\hat{X}$ & is the \index{amplitude} \textbf{amplitude} of the signal, \\
$\omega_0$ & is the \index{angular frequency} \textbf{angular frequency} of the signal, \\
$\varphi_0$ & is the \index{phase} \textbf{phase} of the signal, \\
$t \in \mathbb{R}$ & is the real-value time variable and continuously defined.
\end{tabular}
In fact, the sine function $\sin()$ is mono-chromatic, too. However, it can be derived from \eqref{eq:ch02:mono_chrom_eq} with $\varphi_0 = \SI{90}{\degree}$.
\begin{equation*}
x_{sin}(t) = \hat{X} \cdot \sin\left(\omega_0 t\right) = \cos\left(\omega_0 t - \SI{90}{\degree}\right)
\end{equation*}
The angular frequency is connected to the \index{frequency} \textbf{frequency}.
\begin{equation}
\omega_0 = 2 \pi f_0
\end{equation}
\begin{attention}
You must not confuse the terms \emph{frequency} and \emph{angular frequency}!
\end{attention}
The inverse of the frequency is the \index{period} \textbf{period} $T_0$. It is the time interval at which the signal repeats.
\begin{equation}
T_0 = \frac{1}{f_0} = \frac{2 \pi}{\omega_0}
\end{equation}
Be aware of the units. The period $T_0$ is defined in seconds \si{s}. The frequency $f_0$ is the inverse of seconds, which is Hertz \si{Hz}. The angular frequency $\omega_0$ is the inverse of seconds, too. However, it is never given in Hertz, only in \si{rad/s} or, more commonly, \si{1/s}.
\begin{table}[H]
\centering
\caption{Units}
\begin{tabular}{|l|l|}
\hline
Period $T_0$ & \si{s} \\
\hline
Frequency $f_0$ & \si{Hz} \\
\hline
Angular frequency $\omega_0$ & \si{1/s} \; (never Hertz!) \\
\hline
\end{tabular}
\end{table}
The actual unit of the signal is derived from its amplitude $\hat{X}$ which can be any physical measure.
\paragraph{Representation by A Complex-Valued Phasor.}
A graphical view on the creation of a cosine signal is depicted in Figure \ref{fig:ch02:cos_creation}.
\begin{figure}[H]
\caption{Imagine, there is a pointer (red) with one side fixed to a point. Now, it begins rotating counter-clockwise with an angular frequency of $\omega_0$ (blue). The arrow of the pointer draws a circle (left side). Each angle of the pointer is related to a time instance (green). The blue pointer is the current position at time instance $t$. Its vertical value is projected into the time plot, forming the cosine wave (orange).}
\label{fig:ch02:cos_creation}
\end{figure}
You may now some relations:
\begin{itemize}
\item A full rotation of the pointer takes exactly one period $T_0$.
\item The orange cosine curve can be horizontally shifted by redefining the original angle of the pointer at $T_0$. This offset angle is the phase $\varphi_0$.
\item The length of the pointer and the radius of the circle is the amplitude $\hat{X}$.
\end{itemize}
A mono-chromatic signal can be described by its three parameters
\begin{itemize}
\item Amplitude $\hat{X}$
\item Phase $\varphi_0$
\item Frequency $\omega_0$
\end{itemize}
When a signal passes through a \ac{LTI} system, the amplitude, the phase or both may change. However, the frequency never changes. Thus, the frequency $\omega_0$ is assumed to be constant and neglected. Consequently, the parameters
\begin{itemize}
\item amplitude $\hat{X}$ and
\item phase $\varphi_0$
\end{itemize}
remain. Both are absorbed by the complex-valued \index{phasor} \textbf{phasor} $\underline{X}$, which uniquely describes a mono-chromatic signal.
\begin{equation}
\underline{X} = \hat{X} \cdot e^{-j \varphi_0} = \hat{X} \angle -\varphi_0
\end{equation}
\begin{excursus}{Complex numbers}
$j$ is the \index{imaginary unit} \textbf{imaginary unit}. It satisfies the equation
\begin{equation}
j^2 = -1
\end{equation}
There is no real number $j \notin \mathbb{R}$ which satisfies the above solution. $j$ spans the set of complex numbers $\mathbb{C}$.
In mathematics, the imaginary unit is noted as $i$. In engineering context, $j$ is used instead, because $i$ is the symbol of the electric current.
A complex number $\underline{c} \in \mathbb{C}$ can be noted in \index{cartesian form} \textbf{cartesian form}:
\begin{equation}
\underline{c} = a + j b
\end{equation}
$a \in \mathbb{R}$ is the \index{real part} \textbf{real part} of $\underline{c}$. $b \in \mathbb{R}$ is the \index{imaginary part} \textbf{imaginary part} $\underline{c}$.
\begin{subequations}
\begin{align}
a &= \Re\{\underline{c}\} \\
b &= \Im\{\underline{c}\}
\end{align}
\end{subequations}
Complex numbers $\underline{c}$ always carry an underline in this lecture to distinguish them from real numbers. However, this is not mandatory.
Another notation is the \index{polar form} \textbf{polar form}:
\begin{equation}
\underline{c} = r \cdot e^{j \varphi}
\end{equation}
with
\begin{subequations}
\begin{align}
r &= |\underline{c}| = \sqrt{\Re\{\underline{c}\}^2 + \Im\{\underline{c}\}^2} \\
\varphi &= \mathrm{atan2} \left(\Im\{\underline{c}\}, \Re\{\underline{c}\}\right) \\
e^{j \varphi} &= \cos \varphi + j \sin \varphi
\end{align}
\end{subequations}
The polar form can be written in \index{angle notation} \textbf{angle notation}:
\begin{equation}
\underline{c} = r \angle \varphi
\end{equation}
$r \in \mathbb{R}$ and $\varphi \in \mathbb{R}$ are the \index{polar coordinates} \textbf{polar coordinates}.
\end{excursus}
The phasor $\underline{X} \in \mathbb{C}$ is a complex number, which is mostly represented in polar coordinates (see Figure \ref{fig:ch02:cmplxplane_phasor}).
\begin{figure}[H]
\centering
\begin{tikzpicture}
\draw[->] (-3.2,0) -- (3.2,0) node[below, align=left]{$\Re$};
\draw[->] (0,-3.2) -- (0,3.2) node[left, align=right]{$\Im$};
\draw[->, thick] (0, 0) -- (-40:3) node[right, align=left]{Complex phasor $\underline{X}$\\ (position at $t = 0$)};
\draw (0:1.5) arc(0:-40:1.5) node[midway, right, align=left]{Phase $\varphi_0$};
\draw[->, dashed] (-50:1) arc(-50:30:1) node[right, align=left]{$\omega_0$};
\end{tikzpicture}
\caption{Phasor in the complex plane}
\label{fig:ch02:cmplxplane_phasor}
\end{figure}
Figure \ref{fig:ch02:cmplxplane_phasor} depicts the phasor in the complex plane. Figure \ref{fig:ch02:cos_creation} shows a complex plane, too. Please note that both complex planes are rotated by \SI{90}{\degree} with respect to each other.
\begin{fact}
The phasor of a signal is a signal parameter, constant and \underline{not} time-dependent.
\end{fact}
The current position of the pointer $\underline{x}(t)$ in the complex plane is obtained by rotating it. It makes a full rotation each $T_0$ periods. Therefore, it rotates at an angular frequency of $\omega_0$. The rotation is a multiplication by $e^{j \omega t}$ in the complex plane. $\underline{x}(t) \in \mathbb{C}$ is a complex value, too.
\begin{equation}
\underline{x_{mc}}(t) = \underline{X} \cdot e^{j \omega t} = \hat{X} \cdot e^{-j \varphi_0} \cdot e^{j \omega t}
\end{equation}
\todo{Proof}
The real-valued function can be obtained by extracting the real part of the complex-valued current value.
\begin{equation}
x_{mc}(t) = \Re\left\{\underline{x_{mc}}(t)\right\}
\end{equation}
\section{Periodic Signals and Fourier Series}
Periodic signals $x_p(t)$ comprises a class of signals which indefinitely repeat at constant time intervals $T_0$.
\begin{equation}
x_p(t + n T_0) = x_p(t) \qquad \forall \; n \in \mathbb{Z}, \quad \mathbb{Z} = \left\{..., -2, -1, 0, 1, 2, ...\right\}
\end{equation}
Mono-chromatic signals are a special kind of periodic signals. Multi-frequent signals are composed a limited or unlimited number of mono-chromatic signals, which superimpose. Multi-frequent signals are periodic signals in general.
\begin{fact}
Each periodic signal can be decomposed into a superposition of mono-chromatic signals.
\end{fact}
The inverse of the period $T_0$ is $f_0$, which is the \textbf{base frequency}. This is the frequency at the periodic pattern repeats. Again, frequency and angular frequency $\omega_0 = 2 \pi f_0$ must be distinguished.
The periodic signal can now be decomposed in cosine and sine functions with integer multiples of the base frequency $f_0$ or base angular frequency $\omega_0$, respectively. They are called \index{harmonics} \textbf{harmonics}.
\begin{equation}
\begin{split}
x_p(t) &= \sum\limits_{n=0}^{\infty} a_n \cos\left(n \omega_0 t\right) + \sum\limits_{m=0}^{\infty} b_m \sin\left(m \omega_0 t\right) \qquad \forall \; n, m \in \mathbb{N} = \left\{0, 1, 2, ...\right\} \\
&= a_0 + \sum\limits_{n=1}^{\infty} a_n \cos\left(n \omega_0 t\right) + \sum\limits_{m=1}^{\infty} b_m \sin\left(m \omega_0 t\right) \\
\end{split}
\label{eq:ch02:fourier_series}
\end{equation}
What happened to $n = 0$ and $m = 0$? $\cos(0) = 1$ and $\sin(0) = 0$. That's it.
Comparing to the mono-chromatic signals, what happened to the phase $\varphi_0$? The phase $\varphi_0$ is a characteristic of mono-chromatic signals. It is completely absorbed by the coefficients $a_n$ and $b_n$ of the cosine and sine functions.
\subsection{Orthogonality}
\index{orthogonality}
The cosine and sine functions are orthogonal to each other. In geometry, two vectors $\vect{A}$ and $\vect{B}$ are said to be orthogonal, if the angle between them is \SI{90}{\degree}. In this case, their inner product is zero.
\begin{equation}
\langle \vect{A}, \vect{B} \rangle = 0
\end{equation}
More generally, two functions $f(x)$ and $g(x)$ are orthogonal if their \index{inner product} \textbf{inner product} $\langle f, g \rangle$ is zero.
\begin{equation}
0 \stackrel{!}{=} \langle f, g \rangle_w = \int\limits_{a}^{b} f(x) g(x) w(x) \, \mathrm{d} x
\end{equation}
$w(x)$ is a non-negative weight function, which is $w(x) = 1$ in simple cases like this one.
Now, you can prove that the cosine and sine functions are orthogonal to each other.
\begin{equation}
\int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} \cos\left(n \omega_0 t\right) \sin\left(m \omega_0 t\right) \, \mathrm{d} t = 0 \qquad \forall \; n, m \in \mathbb{Z}
\label{eq:ch02:orth_rel_cos_sin}
\end{equation}
Furthermore, the sine and cosine functions with \underline{different} indices are orthogonal to each other.
\begin{equation}
\int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} \cos\left(n \omega_0 t\right) \cos\left(p \omega_0 t\right) \, \mathrm{d} t = \frac{\pi}{\omega_0} \cdot \delta_{np} \qquad \forall \; n, p \in \mathbb{N}
\label{eq:ch02:orth_rel_cos}
\end{equation}
\begin{equation}
\int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} \sin\left(m \omega_0 t\right) \sin\left(q \omega_0 t\right) \, \mathrm{d} t = \frac{\pi}{\omega_0} \cdot \delta_{mq} \qquad \forall \; m, q \in \mathbb{N}
\label{eq:ch02:orth_rel_sin}
\end{equation}
with the Kronecker delta
\begin{equation}
\delta_{uv} = \begin{cases}
1 & \qquad \text{if } u = v, \\
0 & \qquad \text{if } u \neq v
\end{cases}
\label{eq:ch02:kronecker_delta}
\end{equation}
The \index{orthogonality relations} \textbf{orthogonality relations} \eqref{eq:ch02:orth_rel_cos_sin}, \eqref{eq:ch02:orth_rel_cos} and \eqref{eq:ch02:orth_rel_sin} point out:
\begin{itemize}
\item Cosine functions are orthogonal if their indices are different. I.e., $n \neq p$ in \eqref{eq:ch02:orth_rel_cos}.
\item Sine functions are orthogonal if their indices are different. I.e., $m \neq q$ in \eqref{eq:ch02:orth_rel_sin}.
\item Cosine and sine function are orthogonal independent of their indices.
\item The indices are the integer multiples of the base frequency $\omega_0$ (harmonics).
\end{itemize}
\subsection{Extraction of The Coefficients}
The orthogonality relations are useful to extract the coefficients $a_n$ and $b_n$ in \eqref{eq:ch02:fourier_series}. Given is the input signal $\tilde{x}_p(t)$ whose coefficient shall be determined. Following assumptions can be derived from the properties of a periodic signal:
\begin{itemize}
\item $\tilde{x}_p(t)$ is composed of mono-chromatic cosine and sine functions.
\item All cosine and sine functions have integer multiples of the base frequency.
\item Each cosine and sine function has a different weight -- the coefficient.
\end{itemize}
Using the orthogonality relations, the coefficients $\tilde{a}_n$ and $\tilde{b}_n$ can be obtained by:
\begin{subequations}
\begin{align}
\tilde{a}_n &= \frac{\omega_0}{\pi} \int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} \tilde{x}_p(t) \cdot \cos\left(n \omega_0 t\right) \, \mathrm{d} t \label{eq_ch02_fourier_series_coeff_an} \\
\tilde{b}_m &= \frac{\omega_0}{\pi} \int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} \tilde{x}_p(t) \cdot \sin\left(n \omega_0 t\right) \, \mathrm{d} t \label{eq_ch02_fourier_series_coeff_bm}
\end{align}
\end{subequations}
\begin{proof}{Parameter Extraction for $\tilde{a}_n$}
Given is a periodic function $\tilde{x}_p(t)$, which can be decomposed into:
\begin{equation}
\tilde{x}_p(t) = \sum\limits_{p=0}^{\infty} \tilde{a}_p \cos\left(p \omega_0 t\right) + \sum\limits_{q=0}^{\infty} \tilde{b}_q \sin\left(q \omega_0 t\right)
\label{eq_ch02_proof_per_sig_example}
\end{equation}
The coefficient $\tilde{a}_n$ is of interest.
Inserting \eqref{eq_ch02_proof_per_sig_example} into \eqref{eq_ch02_fourier_series_coeff_an}, yields
\begin{equation}
\tilde{a}_n = \frac{\omega_0}{\pi} \int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} \left(\sum\limits_{p=0}^{\infty} \tilde{a}_p \cos\left(p \omega_0 t\right) + \sum\limits_{q=0}^{\infty} \tilde{b}_q \sin\left(q \omega_0 t\right)\right) \cdot \cos\left(n \omega_0 t\right) \, \mathrm{d} t
\end{equation}
Due to the orthogonality relations, \underline{all products containing a sine function} and \underline{all products containing a cosine function with the index $n \neq p$} become zero. Furthermore, following must be true: $n = p$
\begin{equation}
\tilde{a}_n = \tilde{a}_p \frac{\omega_0}{\pi} \int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} \cos\left(p \omega_0 t\right) \cdot \cos\left(n \omega_0 t\right) \, \mathrm{d} t \qquad \text{if } \; n = p
\end{equation}
Using \eqref{eq:ch02:orth_rel_cos}, the integral resolves to:
\begin{equation}
\tilde{a}_n = \tilde{a}_p \frac{\omega_0}{\pi} \frac{\pi}{\omega_0} \qquad \text{if } \; n = p
\end{equation}
In the end, it could be proven that $\tilde{a}_n = \tilde{a}_p$ for $n = p$.
The proof is analogous for the coefficient $b_n$.
\end{proof}
$\cos\left(n \omega_0 t\right)$ can be seen as a ``test function'', which is used to extract the component with the index $n$. The proof points out:
\begin{itemize}
\item All sine components are erased by $\cos\left(n \omega_0 t\right)$, due to the orthogonality relations.
\item All cosine function with index $p \neq n$ are erased by $\cos\left(n \omega_0 t\right)$, due to the orthogonality relations.
\end{itemize}
For $b_m$, $\sin\left(m \omega_0 t\right)$ is analogous.
\begin{excursus}{Illustration of The ``Test Function''}
For illustration of the ``test functions'', image you have a radio and want to hear a specific station. You tune to the frequency on which the station is broadcasting. All other signals are filtered out, you don't want to hear them. Actually, the radio does not employ orthogonality in this case. However, this illustration might help to understand the meaning of $\cos\left(n \omega_0 t\right)$ and $\sin\left(m \omega_0 t\right)$ \underline{in connection} with the orthogonality relations.
\end{excursus}
A special case is the coefficient $\tilde{a}_0$.
\begin{equation}
\tilde{a}_0 = \frac{\omega_0}{\pi} \int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} \tilde{x}_p(t) \, \mathrm{d} t
\end{equation}
$\cos\left(n \omega_0 t\right)$ is $1$ for $n = 0$. $\tilde{a}_0$ is the \index{DC offset} \textbf{\ac{DC} offset} of the signal. The above formula is known as the calculation of the signal mean in electrical engineering.
\begin{definition}{Fourier series}
The composition of a series of mono-chromatic signals as shown in \eqref{eq:ch02:fourier_series} is called \index{Fourier series} \textbf{Fourier series}.
\begin{equation*}
x_p(t) = \sum\limits_{n=1}^{\infty} a_n \cos\left(n \omega_0 t\right) + \sum\limits_{m=1}^{\infty} b_m \sin\left(m \omega_0 t\right)
\end{equation*}
The coefficients can be calculated using \eqref{eq_ch02_fourier_series_coeff_an} and \eqref{eq_ch02_fourier_series_coeff_bm}.
\end{definition}
\subsection{Complex-Valued Fourier Series}
A complex-valued, periodic signal $\underline{x_p}(t)$ can be decomposed into complex-valued mono-chromatic signals. The coefficients $\underline{c}_n$ are phasors.
\begin{equation}
\underline{x_p}(t) = \sum\limits_{n = -\infty}^{\infty} \underline{c}_n \cdot e^{j n \omega_0 t} \qquad \forall \; n \in \mathbb{Z}
\label{eq:ch02:fourier_series_cmplx}
\end{equation}
The coefficients $\underline{\tilde{c}}_n$ of an input signal $\underline{\tilde{x}_p}(t)$ can be determined by:
\begin{equation}
\underline{\tilde{c}}_n = \frac{\omega_0}{2 \pi} \int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} \underline{\tilde{x}_p}(t) \cdot e^{-j n \omega_0 t} \, \mathrm{d} t
\label{eq_ch02_fourier_series_coeff_cn}
\end{equation}
It is based on the orthogonality relation:
\begin{equation}
\int\limits_{-\frac{T_0}{2}}^{\frac{T_0}{2}} e^{j n \omega_0 t} e^{-j p \omega_0 t} \, \mathrm{d} t = \frac{2 \pi}{\omega_0} \cdot \delta_{np} \qquad \forall \; n, p \in \mathbb{Z}
\label{eq:ch02:orth_rel_exp}
\end{equation}
\begin{definition}{Complex-Valued Fourier series}
A complex-valued, periodic signal $\underline{x_p}(t)$ can be decomposed into a series complex-valued mono-chromatic signals \eqref{eq:ch02:fourier_series_cmplx} -- the \index{Fourier series!complex-valued} \textbf{complex-valued Fourier series}.
\begin{equation*}
\underline{x_p}(t) = \sum\limits_{n = -\infty}^{\infty} \underline{c}_n \cdot e^{j n \omega_0 t} \qquad \forall \; n \in \mathbb{Z}
\end{equation*}
The coefficients can be calculated using \eqref{eq_ch02_fourier_series_coeff_cn}.
\end{definition}
\subsection{Amplitude and Phase Spectra}
Let's consider the complex-valued Fourier series $\underline{x_p}(t)$ \eqref{eq:ch02:fourier_series_cmplx}. The coefficients $\underline{c}_n$ are phasors. Its absolute value (amplitude) $|\underline{c}_n|$ and argument (phase) $\arg\left(\underline{c}_n\right)$ can now be plotted over the index $n$. The index $n \in \mathbb{Z}$ is discrete. Thus, the resulting plots are value-discrete in the dimension of $n$. In contrast, the amplitudes and phases are value-continuous.
\begin{definition}{Spectrum of a period signal}
\begin{itemize}
\item The plot of the amplitude $|\underline{c}_n|$ is called \index{amplitude spectrum} \textbf{amplitude spectrum}.
\item The plot of the phase $\arg\left(\underline{c}_n\right)$ is called \index{phase spectrum} \textbf{phase spectrum}.
\item When referring to the \index{spectrum} \textbf{spectrum}, generally both amplitude and phase, or their complex-valued representation of $\underline{c}_n$ is meant.
\end{itemize}
\end{definition}
\begin{fact}
The index $n \in \mathbb{Z}$ is discrete. The plots of the spectrum are value-discrete in the dimension of $n$.
\end{fact}
When considering a complex-valued signal $\underline{x_p}(t)$, both amplitude and phase can take any value, with following constraints:
\begin{itemize}
\item The amplitude $|\underline{c}_n|$ is always a positive real number.
\item The phase $\arg\left(\underline{c}_n\right)$ a real number from the interval $[-\pi, +\pi]$.
\end{itemize}
If the signal $\underline{x_p}(t) = x_p(t)$ is real-valued, i.e., $\Im\left\{\underline{c}_n(t)\right\} = 0$, the values of $\underline{c}_n$ are even more constrained by the \index{spectrum!symmetry rules} \textbf{symmetry rules}:
\begin{itemize}
\item The coefficients $\underline{c}_n \in \mathbb{C}$ are still complex-valued phasors.
\item But, the coefficients $\underline{c}_n$ show a special symmetry.
\begin{itemize}
\item The amplitude spectrum $|\underline{c}_n|$ is an \underline{even function}. It is symmetric with respect to the $y$-axis.
\item The phase spectrum $\arg\left(\underline{c}_n\right)$ is an \underline{odd function}. It is symmetric with respect to the origin.
\item As a consequence, the phase of $\arg\left(\underline{c}_0\right)$ at $n = 0$ must be either $0$ or $\pm \pi$. Note that, $+\pi$ is identical to $-\pi$ in the complex plane. The phase is the sign of the \ac{DC} bias: $\arg\left(\underline{c}_0\right) = 0$ means positive \ac{DC} bias and $\arg\left(\underline{c}_0\right) = \pi$ means negative \ac{DC} bias.
\end{itemize}
\end{itemize}
These symmetry rules apply for \underline{all} real-valued signals $\underline{x_p}(t) = x_p(t) \in \mathbb{R}$. The symmetry rules ensure that the mono-chromatic components of the Fourier series \eqref{eq:ch02:fourier_series_cmplx} sum up to a real value at each time instance $t \in \mathbb{R}$.
The symmetry rules do \underline{not} apply for complex-valued signals $\underline{x_p}(t) \in \mathbb{C}$.
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[
height={0.25\textheight},
width=0.6\linewidth,
scale only axis,
xlabel={$n$},
ylabel={$|\underline{c}_n|$},
%grid style={line width=.6pt, color=lightgray},
%grid=both,
grid=none,
axis lines=left,
legend pos=north east,
xmin=-4,
xmax=4,
ymin=0,
ymax=3,
xtick={-3, -2, ..., 3},
ytick={0, 0.5, ..., 2.5},
axis y line=middle,
axis x line=middle,
every axis x label/.style={
at={(ticklabel* cs:1.05)},
anchor=north,
},
every axis y label/.style={
at={(ticklabel* cs:1.05)},
anchor=east,
}
]
\addplot[red, thick] coordinates {(-3, 0) (-3, 2.0)};
\addplot[red, thick] coordinates {(-2, 0) (-2, 0.4)};
\addplot[red, thick] coordinates {(-1, 0) (-1, 1.6)};
\addplot[red, thick] coordinates {(0, 0) (0, 1.1)};
\addplot[red, thick] coordinates {(1, 0) (1, 1.6)};
\addplot[red, thick] coordinates {(2, 0) (2, 0.4)};
\addplot[red, thick] coordinates {(3, 0) (3, 2.0)};
\addplot[only marks, red, thick, mark=o] coordinates {(-3, 2.0) (-2, 0.4) (-1, 1.6) (0, 1.1) (1, 1.6) (2, 0.4) (3, 2.0)};
\end{axis}
\end{tikzpicture}
\caption[Amplitude Spectrum of a multi-frequent signal]{Amplitude Spectrum of a multi-frequent signal. The absolute values (amplitudes) of the coefficients are plotted. The signal $\underline{c}_n$ is actually real-valued ($\Im\left\{\underline{c}_n(t)\right\} = 0$). This leads a symmetry with respect to the $y$-axis. The amplitude spectrum of a real-valued signal is an even function.}
\label{fig:ch02:FSeries_Amplitude_Spectrum}
\end{figure}
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[
height={0.25\textheight},
width=0.6\linewidth,
scale only axis,
xlabel={$n$},
ylabel={$\arg\left(\underline{c}_n\right)$},
%grid style={line width=.6pt, color=lightgray},
%grid=both,
grid=none,
axis lines=left,
legend pos=north east,
xmin=-4,
xmax=4,
ymin=-4,
ymax=4,
xtick={-3, -2, ..., 3},
ytick={-3.14159, -1.5708, 1.5708, 3.14159},
yticklabels={$-\pi\hspace{0.30cm}$, $-\frac{\pi}{2}$,
$\frac{\pi}{2}$, $\pi\hspace{0.10cm}$},
axis y line=middle,
axis x line=middle,
every axis x label/.style={
at={(ticklabel* cs:1.05)},
anchor=north,
},
every axis y label/.style={
at={(ticklabel* cs:1.05)},
anchor=east,
}
]
\addplot[red, thick] coordinates {(-3, 0) (-3, 3.14159)};
\addplot[red, thick] coordinates {(-2, 0) (-2, -0.5)};
\addplot[red, thick] coordinates {(-1, 0) (-1, 1.6)};
%\addplot[red, thick] coordinates {(0, 0) (0, 0)};
\addplot[red, thick] coordinates {(1, 0) (1, -1.6)};
\addplot[red, thick] coordinates {(2, 0) (2, 0.5)};
\addplot[red, thick] coordinates {(3, 0) (3, -3.14159)};
\addplot[only marks, red, thick, mark=o] coordinates {(-3, 3.14159) (-2, -0.5) (-1, 1.6) (0, 0.0) (1, -1.6) (2, 0.5) (3, -3.14159)};
\addplot[only marks, blue, mark=x] coordinates {(0, -3.14159) (0, 0.0) (0, 3.14159)};
\end{axis}
\end{tikzpicture}
\caption[Phase Spectrum of a multi-frequent signal]{Phase Spectrum of a multi-frequent signal. The arguments (phases) of the coefficients are plotted. The signal $\underline{c}_n$ is actually real-valued ($\Im\left\{\underline{c}_n(t)\right\} = 0$). This leads a symmetry with respect to the origin. The phase spectrum of a real-valued signal is an odd function. The blue $x$ define the possible phase values of the coefficient $\underline{c}_0$ of the real-valued signal.}
\label{fig:ch02:FSeries_Phase_Spectrum}
\end{figure}
\begin{excursus}{Spectra in the nature}
The spectrum is no abstract, mathematical theory. You can see spectra with your eye:
\begin{figure}[H]
\centering
\includegraphics[scale=1]{../chapter02/Rainbow.jpg}
\caption[A rainbow showing the spectrum of the sunlight]{A rainbow showing the spectrum of the sunlight: The white sunlight is composed of mono-chromatic, electromagnetic waves of all frequencies which are optically visible for humans. When light passes through a dispersive medium (glass prism, raindrop, etc.), it is refracted. Each mono-chromatic component has a different refraction index. The light components are separated by its frequency and become individually visible. An example, is a rainbow as depicted above. \licensequote{\cite{Arz2007}}{``Arz''}{\href{https://creativecommons.org/licenses/by-sa/3.0/deed.en}{CC-BY-SA 3.0}}}
\end{figure}
The rainbow is a natural example of an visible spectrum of the sunlight.
\end{excursus}
\section{Non-Periodic Signals and The Continuous Fourier Transform}
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[
height={0.25\textheight},
width=0.6\linewidth,
scale only axis,
xlabel={$t$},
ylabel={$x_{np}(t)$},
%grid style={line width=.6pt, color=lightgray},
%grid=both,
grid=none,
legend pos=north east,
axis y line=middle,
axis x line=middle,
every axis x label/.style={
at={(ticklabel* cs:1.05)},
anchor=north,
},
every axis y label/.style={
at={(ticklabel* cs:1.05)},
anchor=east,
},
xmin=-3,
xmax=3,
ymin=0,
ymax=2,
xtick={-2, -0.5, ..., 2},
ytick={0, 0.5, ..., 1.5}
]
\addplot[blue, thick] coordinates {(-2, 0) (-0.5, 0)};
\addplot[blue, dashed] coordinates {(-0.5, 0) (-0.5, 1)};
\addplot[blue, thick] coordinates {(-0.5, 1) (0.5, 1)};
\addplot[blue, dashed] coordinates {(0.5, 1) (0.5, 0)};
\addplot[blue, thick] coordinates {(0.5, 0) (2, 0)};
\end{axis}
\end{tikzpicture}
\caption{The rectangular function $\mathrm{rect}$ as an example for a non-period signal}
\label{fig:ch02:rect_function}
\end{figure}
\index{rectangular function}
\subsection{Derivation of The Continuous Fourier Transform}
Non-periodic signals have no repeating pattern. Consequently, there is no period $T_0$. Mathematically, the period is indefinite $T_0 \rightarrow \infty$.
A non-periodic signal $\underline{x_{np}}(t)$ cannot be simply decomposed by a Fourier series \eqref{eq:ch02:fourier_series_cmplx}.
\begin{equation}
\begin{split}
\underline{x_{np}}(t) &= \lim\limits_{T_0 \rightarrow \infty} \sum\limits_{n = -\infty}^{\infty} \underline{c}_n \cdot e^{j n \omega_0 t} \\
&= \lim\limits_{T_0 \rightarrow \infty} \sum\limits_{n = -\infty}^{\infty} \underline{c}_n \cdot e^{j \frac{2 \pi n}{T_0} t}
\end{split}
\label{eq:ch02:sig_np_fourier_series}
\end{equation}
The coefficient $\underline{c}_n$ is defined by \eqref{eq_ch02_fourier_series_coeff_cn}:
\begin{equation*}
\begin{split}
\underline{c}_n &= \frac{\omega_0}{2 \pi} \int\limits_{t = -\frac{T_0}{2}}^{\frac{T_0}{2}} \underline{x_{np}}(t) \cdot e^{-j n \omega_0 t} \, \mathrm{d} t \\
&= \frac{1}{T_0} \int\limits_{t = -\frac{T_0}{2}}^{\frac{T_0}{2}} \underline{x_{np}}(t) \cdot e^{-j n \omega_0 t} \, \mathrm{d} t
\end{split}
\label{eq:ch02:sig_np_cn}
\end{equation*}
In this case where $T_0 \rightarrow \infty$, $n \omega_0$ is substituted by the frequency variable $\omega$.
\begin{equation}
\omega = n \omega_0
\label{eq:ch02:omega_subst}
\end{equation}
Inserting \eqref{eq:ch02:sig_np_cn} into \eqref{eq:ch02:sig_np_fourier_series} while considering \eqref{eq:ch02:omega_subst}, yields:
\begin{equation}
\underline{x_{np}}(t) = \lim\limits_{T_0 \rightarrow \infty} \sum\limits_{n = -\infty}^{\infty} \frac{1}{T_0} \left( \int\limits_{t' = -\frac{T_0}{2}}^{\frac{T_0}{2}} \underline{x_{np}}(t') \cdot e^{-j \omega t'} \, \mathrm{d} t' \right) \cdot e^{j \omega t}
\end{equation}
Remember, that $n$ is still in the sum, since it has been absorbed by $\omega = n \omega_0$.
The outer sum is a Rieman sum. $\frac{1}{T_0}$ is substituted by $\frac{\Delta \omega}{2 \pi}$. With $T_0 \rightarrow \infty$, it can be rewritten as an integral.
\begin{equation}
\underline{x_{np}}(t) = \underbrace{\frac{1}{2 \pi} \int\limits_{\omega = -\infty}^{\infty} \underbrace{\left( \int\limits_{t' = -\infty}^{\infty} \underline{x_{np}}(t') \cdot e^{-j \omega t'} \, \mathrm{d} t' \right)}_{\text{Fourier transform}} \cdot e^{j \omega t} \, \mathrm{d} \omega}_{\text{Inverse Fourier transform}}
\end{equation}
The inner integral is the \textbf{continuous Fourier transform}, also called only \index{Fourier transform} \emph{Fourier transform}.
\begin{definition}{Fourier Transform}
The \index{continuous Fourier transform} \textbf{continuous Fourier transform} of the function $\underline{x}(t)$ is:
\begin{equation}
\underline{X}(j \omega) = \mathcal{F} \left\{\underline{x}(t)\right\} = \int\limits_{t = -\infty}^{\infty} \underline{x}(t) \cdot e^{-j \omega t} \, \mathrm{d} t
\label{eq:ch02:def_fourier_transform}
\end{equation}
The \index{inverse Fourier transform} \index{inverse continuous Fourier transform} \textbf{inverse (continuous) Fourier transform} is:
\begin{equation}
\underline{x}(t) = \mathcal{F}^{-1} \left\{\underline{X}(j \omega)\right\} = \frac{1}{2 \pi} \int\limits_{\omega = -\infty}^{\infty} \underline{X}(j \omega) \cdot e^{+j \omega t} \, \mathrm{d} \omega
\label{eq:ch02:def_inv_fourier_transform}
\end{equation}
\end{definition}
The Fourier transform $\mathcal{F} \left\{\underline{x}(t)\right\}$ and its inverse $\mathcal{F}^{-1} \left\{\underline{X}(j \omega)\right\}$ both yield functions which depend on $t$ or $\omega$, respectively. This relation is sometimes emphasized by appending $(t)$ or $\left(j \omega\right)$.
\begin{subequations}
\begin{align}
\mathcal{F} \left\{\underline{x}(t)\right\} &= \mathcal{F} \left\{\underline{x}(t)\right\} \left(j \omega\right) \\
\mathcal{F}^{-1} \left\{\underline{X}(j \omega)\right\} &= \mathcal{F}^{-1} \left\{\underline{X}(j \omega)\right\} (t)
\end{align}
\end{subequations}
\subsection{Amplitude and Phase Spectra}
The value-continuous complex frequency variable $j \omega$ in the continuous Fourier transforms replaced the value-discrete index $n$ of the Fourier series. Due to their similarity, the constraints for all signals and the \index{spectrum!symmetry rules} \textbf{symmetry rules} for real-valued signals apply analogously.
\begin{itemize}
\item The Fourier transform $\underline{X}(j \omega) \in \mathbb{C}$ is always complex-valued, for both real-valued $\underline{x}(t) = x(t) \in \mathbb{R}$ and complex-valued $\underline{x}(t) \in \mathbb{C}$ signals.
\item The amplitude $|\underline{X}(j \omega)|$ is always a positive real number.
\item The phase $\arg\left(\underline{X}(j \omega)\right)$ a real number from the interval $[-\pi, +\pi]$.
\item For real-valued signals $\underline{x}(t) = x(t) \in \mathbb{R}$, but not for complex-valued $\underline{x}(t) \in \mathbb{C}$ signals, following additional constraints (symmetry rules) apply:
\begin{itemize}
\item The amplitude spectrum $|\underline{X}(j \omega)|$ is an \underline{even function}. It is symmetric with respect to the $y$-axis.
\item The phase spectrum $\arg\left(\underline{X}(j \omega)\right)$ is an \underline{odd function}. It is symmetric with respect to the origin.
\item As a consequence, the phase of $\arg\left(\underline{X}(0)\right)$ at $j \omega = 0$ must be either $0$ or $\pm \pi$. Note that, $+\pi$ is identical to $-\pi$ in the complex plane. The phase is the sign of the \ac{DC} bias: $\arg\left(\underline{X}(0)\right) = 0$ means positive \ac{DC} bias and $\arg\left(\underline{X}(0)\right) = \pi$ means negative \ac{DC} bias.
\end{itemize}
\end{itemize}
Let's investigate the \index{rectangular function} rectangular function from Figure \ref{fig:ch02:rect_function}. It is defined as:
\begin{equation}
\mathrm{rect}(t) = \begin{cases}
0 & \qquad \text{if } \; |t| > \frac{1}{2}, \\
1 & \qquad \text{if } \; |t| < \frac{1}{2}
\end{cases}
\end{equation}
The function is undefined for $t = \pm \frac{1}{2}$. The function is now transformed, i.e., $\underline{x}(t) = \mathrm{rect}(t)$.
\begin{equation}
\underline{X}\left(j \omega\right) = \int\limits_{t = -\infty}^{\infty} \mathrm{rect}(t) \cdot e^{-j \omega t} \, \mathrm{d} t = \mathrm{sinc}\left(\frac{\omega}{2 \pi}\right)
\end{equation}
where $\mathrm{sinc}(t)$ is the \emph{normalized} sinc function.
\begin{attention}
Mathematics and engineering use a slightly different definition of the sinc function.
In mathematics, it is \index{sinc function!unnormalized} \textbf{\textit{unnormalized} sinc function}:
\begin{equation*}
\mathrm{sinc}(t) = \frac{\sin\left(t\right)}{t}
\end{equation*}
In the context of signal processing and information theory, it is the \index{sinc function!normalized} \textbf{\textit{normalized} sinc function}:
\begin{equation*}
\mathrm{sinc}(t) = \frac{\sin\left(\pi t\right)}{\pi t}
\end{equation*}
In either case, the value at $t = 0$ is defined to:
\begin{equation*}
\mathrm{sinc}(t = 0) = \lim\limits_{t \rightarrow 0} \frac{\sin\left(t\right)}{t} = 1
\end{equation*}
\end{attention}
The resulting spectra of $\underline{X}\left(j \omega\right)$ can now be drawn. The rectangular function is special. The imaginary part $\Im\left\{\underline{X}\left(j \omega\right)\right\} = 0$ is zero. Thus, the phase can only be $0$ or $\pm \pi$. However, this is a special property of all functions which are real-valued and even in the time domain like the sinc function.
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[
height={0.25\textheight},
width=0.6\linewidth,
scale only axis,
xlabel={$\omega$},
ylabel={$|\underline{X}\left(j \omega\right)|$},
%grid style={line width=.6pt, color=lightgray},
%grid=both,
grid=none,
legend pos=north east,
axis y line=middle,
axis x line=middle,
every axis x label/.style={
at={(ticklabel* cs:1.05)},
anchor=north,
},
every axis y label/.style={
at={(ticklabel* cs:1.05)},
anchor=east,
},
xmin=-52,
xmax=52,
ymin=0,
ymax=1.2,
xtick={-50, -40, ..., 50},
ytick={0, 0.25, ..., 1.0}
]
\addplot[red, thick, smooth, domain=-50:50, samples=200] plot (\x,{abs(sinc((1/(2*pi))*\x))});
\end{axis}
\end{tikzpicture}
\caption{Amplitude spectrum of the rectangular function}
\label{fig:ch02:rect_function_ampl_spectrum}
\end{figure}
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[
height={0.25\textheight},
width=0.6\linewidth,
scale only axis,
xlabel={$\omega$},
ylabel={$\arg\left(\underline{X}(j \omega)\right)$},
%grid style={line width=.6pt, color=lightgray},
%grid=both,
grid=none,
legend pos=north east,
axis y line=middle,
axis x line=middle,
every axis x label/.style={
at={(ticklabel* cs:1.05)},
anchor=north,
},
every axis y label/.style={
at={(ticklabel* cs:1.05)},
anchor=east,
},
xmin=-52,
xmax=52,
ymin=-4,
ymax=4,
xtick={-50, -40, ..., 50},
ytick={-3.14159, -1.5708, 1.5708, 3.14159},
yticklabels={$-\pi\hspace{0.30cm}$, $-\frac{\pi}{2}$,
$\frac{\pi}{2}$, $\pi\hspace{0.10cm}$},
]
\addplot[red, thick] coordinates {(-50, 0) (-39.48, 0)};
\addplot[red, dashed] coordinates {(-39.48, 0)(-39.48, -3.14159)};
\addplot[red, thick] coordinates {(-39.48, -3.14159) (-19.74, -3.14159)};
\addplot[red, dashed] coordinates {(-19.74, -3.14159) (-19.74, 0)};
\addplot[red, thick] coordinates {(-19.74, 0) (19.74, 0)};
\addplot[red, dashed] coordinates {(19.74, 0) (19.74, 3.14159)};
\addplot[red, thick] coordinates {(19.74, 3.14159) (39.48, 3.14159)};
\addplot[red, dashed] coordinates {(39.48, 3.14159)(39.48, 0)};
\addplot[red, thick] coordinates {(39.48, 0)(50, 0)};
\end{axis}
\end{tikzpicture}
\caption[Phase spectrum of the rectangular function]{Phase spectrum of the rectangular function. Please note that $- \pi$ is equivalent to $+ \pi$.}
\label{fig:ch02:rect_function_phase_spectrum}
\end{figure}
\subsection{Time Domain and Frequency Domain}
You have learnt two representations of a signal, so far.
\begin{itemize}
\item \index{time domain} \textbf{Time domain} -- A signal is a function $\underline{x}(t)$ of the time.
\item \index{frequency domain} \textbf{Frequency domain} -- A signal is a function $\underline{X}(j \omega)$ of the frequency.
\end{itemize}
Both $\underline{x}(t)$ and $\underline{X}(j \omega)$ refer to the same signal.
The frequency domain is obtained from the time domain by a transform. For time-continuous signals, these transforms one of:
\begin{itemize}
\item Fourier series
\item continuous Fourier transform
\end{itemize}
The time domain is obtained by the respective inverse transform.
\begin{definition}{Transform operator}
The operation of a transform between time and frequency domain is written as:
\begin{equation}
\underline{x}(t) \TransformHoriz \underline{X}(j \omega)
\end{equation}
for the transform from time to frequency domain, and vice versa:
\begin{equation}
\underline{X}(j \omega) \InversTransformHoriz \underline{x}(t)
\end{equation}
\end{definition}
\textbf{But what is the purpose of the transforms?}
\begin{figure}[H]
\centering
\begin{tikzpicture}
\node[align=center, minimum width=2.5cm, minimum height=1.5cm] (ProbTD) {\textbf{Problem}\\ in time domain};
\node[align=center, minimum width=2.5cm, minimum height=1.5cm, right=5cm of ProbTD] (ProbFD) {\textbf{Problem}\\ in frequency domain};
\node[align=center, minimum width=2.5cm, minimum height=1.5cm, below=3cm of ProbTD] (SolTD) {\textbf{Solution}\\ in time domain};
\node[align=center, minimum width=2.5cm, minimum height=1.5cm, below=3cm of ProbFD] (SolFD) {\textbf{Solution}\\ in frequency domain};
\draw[-latex, thick] (ProbTD.south) -- node[midway, left, align=right]{Hard to solve} (SolTD.north);
\draw[-latex, thick] (ProbTD.east) -- node[midway, above, align=center]{Transform} (ProbFD.west);
\draw[-latex, thick] (ProbFD.south) -- node[midway, right, align=left]{Easy to solve} (SolFD.north);
\draw[-latex, thick] (SolFD.west) -- node[midway, above, align=center]{Inverse Transform} (SolTD.east);
\end{tikzpicture}
\caption{Explanation of the purpose of transforms}
\end{figure}
\section{Properties of The Continuous Fourier Transform}
\subsection{Energy Signals and Power Signals}
Besides the classification of signals into periodic and non-periodic, signals can be divided into \index{energy signals} \textbf{energy signals} and \index{power signals} \textbf{power signals}.
\begin{definition}{Energy and Power Signals}
\begin{itemize}
\item \textbf{Energy signals} have a finite, positive signal energy $0 < E < \infty$, but their average power is zero $P = 0$.
\item \textbf{Power signals} have a finite, positive average signal power $0 < P < \infty$, but their signal energy is indefinite $E = \infty$.
\end{itemize}
\end{definition}
The \index{average signal power} \textbf{average signal power} $P$ is a measure for the amount of energy transferred per unit time and defined by:
\begin{equation}
P = \lim\limits_{T \rightarrow \infty} \frac{1}{T} \int\limits_{-\frac{T}{2}}^{\frac{T}{2}} \left|x(t)\right|^2 \; \mathrm{d} t
\end{equation}
The signal power is connected to the \ac{RMS} value, which is often used in electrical engineering.
\begin{equation}
\hat{x}_{RMS} = \lim\limits_{T \rightarrow \infty} \sqrt{ \frac{1}{T} \int\limits_{-\frac{T}{2}}^{\frac{T}{2}} \left|x(t)\right|^2 \; \mathrm{d} t}
\end{equation}
The \index{signal energy} \textbf{signal energy} $E$ is:
\begin{equation}
E = \int\limits_{-\infty}^{\infty} \left|x(t)\right|^2 \; \mathrm{d} t
\end{equation}
The property of power signals, which have an indefinite signal energy, is a problem for the Fourier transform. The transform would yield an indefinite value. Thus:
\begin{fact}
Every energy signal has a Fourier transform.
\end{fact}
Only some power signals have a Fourier transform. There are distributions which are power signals, but have a Fourier transform, too. Especially, all \emph{tempered distributions} have a Fourier transform.
\subsection{Dirac Delta Function}
An important distribution is the \index{Dirac delta function} \textbf{Dirac delta function} $\delta(t)$. The Dirac delta function is zero everywhere except at its origin, where it is an indefinitely narrow, indefinitely high pulse.
\begin{equation}
\delta(t) = \begin{cases}
+\infty & \qquad \text{if } t = 0, \\
0 & \qquad \text{if } t \neq 0
\end{cases}
\label{eq:ch02:dirac_delta}
\end{equation}
It is constrained by
\begin{equation}
\int\limits_{-\infty}^{\infty} \delta(t) \; \mathrm{d} t = 1
\end{equation}
\begin{attention}
The Dirac delta function $\delta(t)$ must not be confused with the Kronecker delta \eqref{eq:ch02:kronecker_delta}. The Dirac delta function operates in continuous space $t \in \mathbb{R}$. The Kronecker delta $\delta_n$ (here one-dimensional) operates in discrete space $n \in \mathbb{Z}$.
\end{attention}
A special feature of the function is called \index{Dirac measure} \textbf{Dirac measure}.
\begin{equation}
\int\limits_{-\infty}^{\infty} f(t) \delta(t) \; \mathrm{d} t = f(0)
\label{eq:ch02:dirac_measure}
\end{equation}
Using the Dirac measure, the Fourier transform can be calculated:
\begin{equation}
\mathcal{F} \left\{\delta(t)\right\} = \int\limits_{-\infty}^{\infty} \delta(t) \cdot e^{-j \omega t} \; \mathrm{d} t = 1
\end{equation}
The Fourier transform of the Dirac delta function is the frequency-independent constant $1$.
\subsection{Basic Properties}
All properties of the Fourier transform can be proven using the definition of the Fourier transform \eqref{eq:ch02:def_fourier_transform}.
\subsubsection{Linearity}
%\begin{equation}
%\end{equation}
\begin{definition}{Linearity of the Fourier transform}
\begin{equation}
\mathcal{F}\left\{\underline{a} \cdot \underline{f}(t) + \underline{b} \cdot \underline{g}(t)\right\} = \underline{a} \cdot \mathcal{F}\left\{\underline{f}(t)\right\} + \underline{b} \cdot \mathcal{F}\left\{\underline{g}(t)\right\}
\label{eq:ch02:op_lin}
\end{equation}
where
\begin{itemize}
\item $\underline{a} \in \mathbb{C}$ and $\underline{b} \in \mathbb{C}$ are complex numbers and
\item $\underline{f}(t)$ and $\underline{g}(t)$ are Fourier-transformable functions.
\end{itemize}
\end{definition}
\subsubsection{Differentiation and Integration}
\begin{definition}{Differentiation of the Fourier transform}
\begin{equation}
\mathcal{F}\left\{\frac{\mathrm{d}^n}{\mathrm{d} t^n} \underline{f}(t)\right\} = \left(j \omega\right)^n \underbrace{\underline{F} \left(j \omega\right)}_{= \mathcal{F}\left\{\underline{f}(t)\right\}}
\label{eq:ch02:op_diff}
\end{equation}
\end{definition}
\begin{definition}{Integration of the Fourier transform}
\begin{equation}
\mathcal{F}\left\{\int\limits_{t'= -\infty}^{t} \underline{f}(t') \, \mathrm{d} t' \right\} = \frac{1}{j \omega} \underbrace{\underline{F} \left(j \omega\right)}_{= \mathcal{F}\left\{\underline{f}(t)\right\}}
\label{eq:ch02:op_int}
\end{equation}
\end{definition}
\begin{excursus}{Network analysis of reactive electrical circuits}
Linear, reactive electrical networks are analysed using the Fourier transform.
For example, voltage and current have following relation in time domain at a capacity:
\begin{equation}
u(t) = \frac{1}{C} \int i(t) \, \mathrm{d} t
\end{equation}
The expression in complex-valued phasors (frequency domain) is:
\begin{equation}
\underline{U} = \underline{Z}_C \cdot \underline{I}
\end{equation}
Using the Fourier transform, the impedance $\underline{Z}_C$ can be determined to:
\begin{equation}
\underline{Z}_C = \frac{1}{j \omega C}
\end{equation}
The calculation is analogous for inductances. The volatge-current relation in the time domain is:
\begin{equation}
u(t) = L \cdot \frac{\mathrm{d}}{\mathrm{d} t} i(t)
\end{equation}
The complex-valued impedance $\underline{Z}_L$ (frequency domain) is:
\begin{equation}
\underline{Z}_L = j \omega L
\end{equation}
\end{excursus}
\subsubsection{Multiplication}
\begin{definition}{Convolution theorem}
A multiplication in the time-domain becomes a convolution in the frequency domain.
\begin{equation}
\mathcal{F}\left\{ \underline{f}(t) \cdot \underline{g}(t) \right\} = \frac{1}{2 \pi} \mathcal{F}\left\{\underline{f}(t)\right\} * \mathcal{F}\left\{\underline{g}(t)\right\}
\label{eq:ch02:op_mult}
\end{equation}
\end{definition}
\begin{excursus}{Convolution}
The convolution is defined to:
\begin{equation}
f(t) * g(t) = \left(f * g\right) (t) = \int_{\tau = -\infty}^{\infty} f(\tau) g(t - \tau) \, \mathrm{d} \tau
\label{eq:ch02:def_convolution}
\end{equation}
\end{excursus}
\subsubsection{Time Shift}
%Let
%\begin{equation}
% h(t) = \underline{f}(t - t_0)
%\end{equation}
\begin{definition}{Translation}
\begin{equation}
\mathcal{F}\left\{\underline{f}(t - t_0)\right\} = e^{-j t_0 \omega} \cdot \underbrace{\underline{F} \left(j \omega\right)}_{= \mathcal{F}\left\{\underline{f}(t)\right\}}
\label{eq:ch02:op_time_shift}
\end{equation}
where
\begin{itemize}
\item $t_0 \in \mathbb{R}$ is a real number and
\item $\underline{f}(t)$ is a Fourier-transformable function.
\end{itemize}
\end{definition}
\subsection{Duality}
\begin{definition}{Duality}
Suppose $\underline{g}(t)$ has a Fourier transform $\underline{G}\left(j \omega\right)$, i.e., $\underline{g} \TransformHoriz \underline{G}$. The Fourier transform of $\underline{G}(t)$ is:
\begin{equation}
\mathcal{F}\left\{\underline{G}(t)\right\} = \underline{g} \left(- j \omega\right)
\label{eq:ch02:op_duality}
\end{equation}
\end{definition}
An example for the duality is, the frequency shift. We already know the Fourier transform of the time shift \eqref{eq:ch02:op_time_shift}.
\begin{equation}
\mathcal{F}\left\{e^{j \omega_0 t} \underline{f}(t)\right\} = \underbrace{\underline{F} \left(j (\omega - \omega_0)\right)}_{= \mathcal{F}\left\{\underline{f}(t)\right\} \left( j (\omega - \omega_0) \right)}
\label{eq:ch02:op_freq_shift}
\end{equation}
Another example is the convolution in time-domain. Due to the duality, it becomes a multiplication the frequency domain.
\begin{equation}
\mathcal{F}\left\{ \underline{f}(t) * \underline{f}(t) \right\} = \mathcal{F}\left\{\underline{f}(t)\right\} \cdot \mathcal{F}\left\{\underline{g}(t)\right\}
\label{eq:ch02:op_conv}
\end{equation}
\begin{figure}[H]
\centering
\begin{tikzpicture}
\node[align=center, minimum width=2.5cm, minimum height=1.5cm] (TD1) {$\underline{f}(t) \cdot \underline{g}(t)$};
\node[align=center, minimum width=2.5cm, minimum height=1.5cm, right=3.5cm of TD1] (TD2) {$\underline{f}(t) * \underline{f}(t)$};
\node[align=center, minimum width=2.5cm, minimum height=1.5cm, below=2cm of TD1] (FD1) {$\frac{1}{2 \pi} \left(\underline{F}\left(j \omega\right) * \underline{G}\left(j \omega\right)\right)$};
\node[align=center, minimum width=2.5cm, minimum height=1.5cm, below=2cm of TD2] (FD2) {$\underline{F}\left(j \omega\right) \cdot \underline{G}\left(j \omega\right)$};
\node[align=right, anchor=east, left=3cm of TD1] (LabelTD) {\textbf{Time domain}};
\node[align=right, anchor=east, below=2cm of LabelTD] (LabelFD) {\textbf{Frequency domain}};
\node[align=right, above=1cm of TD1] (Func1) {\textbf{Function 1}};
\node[align=right, above=1cm of TD2] (Func2) {\textbf{Function 2}};
%\draw (TD1) node[midway, align=right, rotate=-90]{$\TransformHoriz$} (FD1);
%\draw (TD2) node[midway, align=right, rotate=-90]{$\TransformHoriz$} (FD2);
\draw[o-*, thick] (TD1.south) -- (FD1.north);
\draw[o-*, thick] (TD2.south) -- (FD2.north);
\draw[thick] (TD1.south east) -- (FD2.north west);
\draw[thick] (TD2.south west) -- (FD1.north east);
\end{tikzpicture}
\caption{Duality}
\end{figure}
The duality also affects the units of the time variable $t$ and the frequency variable $\omega$. The units must be inverse. If $t$ is in seconds, $\omega$ must be \si{1/s}.
\section{\acs{LTI} Systems}
\subsection{Transfer Function}
\subsection{Impulse Response}
% Convolution
\subsection{Poles and Zeroes}
\printbibliography[heading=subbibliography]
\end{refsection}
|