ba-thesis/latex/thesis/chapters/theoretical_background.tex

218 lines
8.0 KiB
TeX

\chapter{Theoretical Background}%
\label{chapter:theoretical_background}
In this chapter, the theoretical background necessary to understand this
work is given.
First, the used notation is clarified.
The physical aspects are detailed - the used modulation scheme and channel model.
A short introduction of channel coding with binary linear codes and especially
\ac{LDPC} codes is given.
The established methods of decoding LPDC codes are briefly explained.
Lastly, the optimization methods utilized are described.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Notation}
\label{sec:theo:Notation}
%
% TODOs
%
\begin{itemize}
\item General remarks on notation (matrices, \ldots)
\item Probabilistic quantities (random variables, \acp{PDF}, pdfs vs pmfs vs cdfs, \ldots)
\end{itemize}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Preliminaries: Channel Model and Modulation}
\label{sec:theo:Preliminaries: Channel Model and Modulation}
%
% TODOs
%
\begin{itemize}
\item \Ac{AWGN}
\item \Ac{BPSK}
\end{itemize}
%
% Figure showing notation for entire coding / decoding process
%
\tikzstyle{box} = [rectangle, minimum width=1.5cm, minimum height=0.7cm,
rounded corners=0.1cm, text centered, draw=black, fill=KITgreen!80]
\begin{figure}[htpb]
\centering
\begin{tikzpicture}[scale=1, transform shape]
\node (in) {$\boldsymbol{c}$};
\node[box, right=0.5cm of in] (bpskmap) {Mapper};
\node[right=1.5cm of bpskmap,
draw, circle, inner sep=0pt, minimum size=0.5cm] (add) {$+$};
\node[below=0.5cm of add] (noise) {$\boldsymbol{z}$};
\node[box, right=1.5cm of add] (decoder) {Decoder};
\node[box, right=1.5cm of decoder] (demapper) {Demapper};
\node[right=0.5cm of demapper] (out) {$\boldsymbol{\hat{c}}$};
\node at ($(bpskmap.east)!0.5!(add.west) + (0,0.3cm)$) {$\boldsymbol{x}$};
\node at ($(add.east)!0.5!(decoder.west) + (0,0.3cm)$) {$\boldsymbol{y}$};
\node at ($(decoder.east)!0.5!(demapper.west) + (0,0.3cm)$) {$\boldsymbol{\hat{x}}$};
\draw[->] (in) -- (bpskmap);
\draw[->] (bpskmap) -- (add);
\draw[->] (add) -- (decoder);
\draw[->] (noise) -- (add);
\draw[->] (decoder) -- (demapper);
\draw[->] (demapper) -- (out);
\end{tikzpicture}
\caption{Overview of notation}
\label{fig:notation}
\end{figure}
\todo{Note about $\tilde{\boldsymbol{c}}$ (and maybe $\tilde{\boldsymbol{x}}$?)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Channel Coding with LDPC Codes}
\label{sec:theo:Channel Coding with LDPC Codes}
\begin{itemize}
\item Introduction
\item Binary linear codes
\item \Ac{LDPC} codes (especially $i$, $j$, parity check matrix $\boldsymbol{H}$, $N\left( j \right) $ \& $N\left( i \right) $, etc.)
\end{itemize}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Decoding LDPC Codes using Belief Propagation}
\label{sec:theo:Decoding LDPC Codes using Belief Propagation}
\begin{itemize}
\item Introduction to message passing
\item Overview of \ac{BP} algorithm
\end{itemize}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Optimization Methods}
\label{sec:theo:Optimization Methods}
\begin{itemize}
\item \ac{ADMM}
\item proximal decoding
\end{itemize}
Generally, any linear program \todo{Acronym} can be expressed in \textit{standard form}%
\footnote{The inequality $\boldsymbol{x} \ge \boldsymbol{0}$ is to be
interpreted componentwise.}
\cite[Sec. 1.1]{intro_to_lin_opt_book}:%
%
\begin{alignat}{3}
\begin{alignedat}{3}
\text{minimize }\hspace{2mm} && \boldsymbol{\gamma}^\text{T} \boldsymbol{x} \\
\text{subject to }\hspace{2mm} && \boldsymbol{A}\boldsymbol{x} & = \boldsymbol{b} \\
&& \boldsymbol{x} & \ge \boldsymbol{0}.
\end{alignedat}
\label{eq:theo:admm_standard}
\end{alignat}%
%
A technique called \textit{lagrangian relaxation}%
\todo{Citation needed}%
can then be applied - some of the
constraints are moved into the objective function itself and the weights
$\boldsymbol{\lambda}$ are introduced. A new, relaxed problem is formulated:
%
\begin{align}
\begin{aligned}
\text{minimize }\hspace{2mm} & \boldsymbol{\gamma}^\text{T}\boldsymbol{x}
+ \boldsymbol{\lambda}^\text{T}\left(\boldsymbol{b}
- \boldsymbol{A}\boldsymbol{x} \right) \\
\text{subject to }\hspace{2mm} & \boldsymbol{x} \ge \boldsymbol{0},
\end{aligned}
\label{eq:theo:admm_relaxed}
\end{align}%
%
the new objective function being the \textit{lagrangian}%
%
\begin{align*}
\mathcal{L}\left( \boldsymbol{x}, \boldsymbol{b}, \boldsymbol{\lambda} \right)
= \boldsymbol{\gamma}^\text{T}\boldsymbol{x}
+ \boldsymbol{\lambda}^\text{T}\left(\boldsymbol{b}
- \boldsymbol{A}\boldsymbol{x} \right)
.\end{align*}%
This problem is not directly equivalent to the original one, as the
solution now depends on the choice of the \textit{lagrange multipliers}
$\boldsymbol{\lambda}$.
Interestingly, for our particular class of problems,
the optimal objective of the relaxed problem (\ref{eq:theo:admm_relaxed}) is a lower bound for
the optimal objective of the original problem (\ref{eq:theo:admm_standard})
\cite[Sec. 4.1]{intro_to_lin_opt_book}:%
%
\begin{align*}
\min_{\substack{\boldsymbol{x} \ge \boldsymbol{0} \\ \phantom{a}}}
\mathcal{L}\left( \boldsymbol{x}, \boldsymbol{b}, \boldsymbol{\lambda}
\right)
\le
\min_{\substack{\boldsymbol{x} \ge \boldsymbol{0} \\ \boldsymbol{A}\boldsymbol{x}
= \boldsymbol{b}}}
\boldsymbol{\gamma}^\text{T}\boldsymbol{x}
.\end{align*}
%
Furthermore, for linear programs \textit{strong duality}
always holds.
\todo{Citation needed}
This means that not only is it a lower bound, the tightest lower
bound actually reaches the value itself:
%
\begin{align*}
\max_{\boldsymbol{\lambda}} \, \min_{\boldsymbol{x} \ge \boldsymbol{0}}
\mathcal{L}\left( \boldsymbol{x}, \boldsymbol{b}, \boldsymbol{\lambda} \right)
= \min_{\substack{\boldsymbol{x} \ge \boldsymbol{0} \\ \boldsymbol{A}\boldsymbol{x}
= \boldsymbol{b}}}
\boldsymbol{\gamma}^\text{T}\boldsymbol{x}
.\end{align*}
%
In other words, with the optimal choice of $\boldsymbol{\lambda}$,
the optimal objectives of the problems (\ref{eq:theo:admm_relaxed})
and (\ref{eq:theo:admm_standard}) have the same value.
Thus, we can define the \textit{dual problem} as the search for the tightest lower bound:%
%
\begin{align}
\text{maximize }\hspace{2mm} & \min_{\boldsymbol{x} \ge \boldsymbol{0}} \mathcal{L}
\left( \boldsymbol{x}, \boldsymbol{b}, \boldsymbol{\lambda} \right)
\label{eq:theo:dual}
,\end{align}
%
and recover the optimal point $\boldsymbol{x}_{\text{opt}}$
(the solution to problem (\ref{eq:theo:admm_standard}))
from the dual optimal point $\boldsymbol{\lambda}_\text{opt}$
(the solution to problem (\ref{eq:theo:dual}))
by computing \cite[Sec. 2.1]{admm_distr_stats}%
%
\begin{align}
\boldsymbol{x}_{\text{opt}} = \argmin_{\boldsymbol{x}}
\mathcal{L}\left( \boldsymbol{x}, \boldsymbol{b},
\boldsymbol{\lambda}_{\text{opt}} \right)
\label{eq:theo:admm_obtain_primal}
.\end{align}
%
The dual problem can then be solved using \textit{dual ascent}: starting with an
initial estimate of $\boldsymbol{\lambda}$, calculate an estimate for $\boldsymbol{x}$
using equation (\ref{eq:theo:admm_obtain_primal}); then, update $\boldsymbol{\lambda}$
using gradient descent \cite[Sec. 2.1]{admm_distr_stats}:%
%
\begin{align*}
\boldsymbol{x} &\leftarrow \argmin_{\boldsymbol{x}} \mathcal{L}\left(
\boldsymbol{x}, \boldsymbol{b}, \boldsymbol{\lambda} \right) \\
\boldsymbol{\lambda} &\leftarrow \boldsymbol{\lambda}
+ \alpha\left( \boldsymbol{A}\boldsymbol{x} - \boldsymbol{b} \right),
\hspace{5mm} \alpha > 0
.\end{align*}