ba-thesis/latex/thesis/chapters/lp_dec_using_admm.tex
2023-04-24 23:23:35 +02:00

1644 lines
68 KiB
TeX

\chapter{\acs{LP} Decoding using \acs{ADMM}}%
\label{chapter:lp_dec_using_admm}
This chapter is concerned with \ac{LP} decoding - the reformulation of the
decoding problem as a linear program.
More specifically, the \ac{LP} decoding problem is solved using \ac{ADMM}.
First, the general field of \ac{LP} decoding is introduced.
The application of \ac{ADMM} to the decoding problem is explained and some
notable implementation details are mentioned.
Finally, the behavior of the algorithm is examined based on simulation
results.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{LP Decoding}%
\label{sec:lp:LP Decoding}
\Ac{LP} decoding is a subject area introduced by Feldman et al.
\cite{feldman_paper}. They reframe the decoding problem as an
\textit{integer linear program} and subsequently present two relaxations into
\textit{linear programs}, one representing an \ac{LP} formulation of exact
\ac{ML} decoding and one, which is an approximation with a more manageable
representation.
To solve the resulting linear program, various optimization methods can be
used (see for example \cite{alp}, \cite{interior_point},
\cite{efficient_lp_dec_admm}, \cite{pdd}).
Feldman et al. begin by looking at the \ac{ML} decoding problem%
\footnote{They assume that all codewords are equally likely to be transmitted,
making the \ac{ML} and \ac{MAP} decoding problems equivalent.}%
%
\begin{align}
\hat{\boldsymbol{c}}_{\text{\ac{ML}}} = \argmax_{\boldsymbol{c} \in \mathcal{C}}
f_{\boldsymbol{Y} \mid \boldsymbol{C}}
\left( \boldsymbol{y} \mid \boldsymbol{c} \right)%
\label{eq:lp:ml}
.\end{align}%
%
Assuming a memoryless channel, equation (\ref{eq:lp:ml}) can be rewritten in terms
of the \acp{LLR} $\gamma_i$ \cite[Sec. 2.5]{feldman_thesis}:%
%
\begin{align*}
\hat{\boldsymbol{c}}_{\text{\ac{ML}}} = \argmin_{\boldsymbol{c}\in\mathcal{C}}
\sum_{i=1}^{n} \gamma_i c_i,%
\hspace{5mm} \gamma_i = \ln\left(
\frac{f_{Y_i | C_i} \left( y_i \mid c_i = 0 \right) }
{f_{Y_i | C_i} \left( y_i \mid c_i = 1 \right) } \right)
.\end{align*}
%
The authors propose using the following cost function%
\footnote{In this context, \textit{cost function} and \textit{objective function}
have the same meaning.}
for the \ac{LP} decoding problem:%
%
\begin{align*}
g\left( \boldsymbol{c} \right) = \sum_{i=1}^{n} \gamma_i c_i
= \boldsymbol{\gamma}^\text{T}\boldsymbol{c}
.\end{align*}
%
With this cost function, the exact integer linear program formulation of \ac{ML}
decoding becomes%
%
\begin{align*}
\text{minimize }\hspace{2mm} & \boldsymbol{\gamma}^\text{T}\boldsymbol{c} \\
\text{subject to }\hspace{2mm} &\boldsymbol{c} \in \mathcal{C}
.\end{align*}%
%
%\todo{$\boldsymbol{c}$ or some other variable name? e.g. $\boldsymbol{c}^{*}$.
%Especially for the continuous variable in LP decoding}
As solving integer linear programs is generally NP-hard, this decoding problem
has to be approximated by a problem with looser constraints.
A technique called \textit{relaxation} is applied:
relaxing the constraints, thereby broadening the considered domain
(e.g., by lifting the integer requirement).
First, the authors present an equivalent \ac{LP} formulation of exact \ac{ML}
decoding, redefining the constraints in terms of the \text{codeword polytope}
%
\begin{align*}
\text{poly}\left( \mathcal{C} \right) = \left\{
\sum_{\boldsymbol{c} \in \mathcal{C}} \alpha_{\boldsymbol{c}} \boldsymbol{c}
\text{ : } \alpha_{\boldsymbol{c}} \ge 0,
\sum_{\boldsymbol{c} \in \mathcal{C}} \alpha_{\boldsymbol{c}} = 1 \right\}
,\end{align*} %
%
which represents the \textit{convex hull} of all possible codewords,
i.e., the convex set of linear combinations of all codewords.
This corresponds to simply lifting the integer requirement.
However, since the number of constraints needed to characterize the codeword
polytope is exponential in the code length, this formulation is relaxed further.
By observing that each check node defines its own local single parity-check
code, and, thus, its own \textit{local codeword polytope},
the \textit{relaxed codeword polytope} $\overline{Q}$ is defined as the intersection of all
local codeword polytopes.
This consideration leads to constraints that can be described as follows
\cite[Sec. II, A]{efficient_lp_dec_admm}:%
%
\begin{align*}
\boldsymbol{T}_j \tilde{\boldsymbol{c}} \in \mathcal{P}_{d_j}
\hspace{5mm}\forall j\in \mathcal{J}
,\end{align*}%
%
where $\mathcal{P}_{d_j}$ is the \textit{check polytope}, i.e., the convex hull of all
binary vectors of length $d_j$ with even parity%
\footnote{Essentially $\mathcal{P}_{d_j}$ is the set of vectors that satisfy
parity-check $j$, but extended to the continuous domain.},
and $\boldsymbol{T}_j$ is the \textit{transfer matrix}, which selects the
neighboring variable nodes
of check node $j$ (i.e., the relevant components of $\tilde{\boldsymbol{c}}$
for parity-check $j$).
For example, if the $j$th row of the parity-check matrix
$\boldsymbol{H}$ was $\boldsymbol{h}_j =
\begin{bmatrix} 0 & 1 & 0 & 1 & 0 & 1 & 0 \end{bmatrix}$,
the transfer matrix would be \cite[Sec. II, A]{efficient_lp_dec_admm}
%
\begin{align*}
\boldsymbol{T}_j =
\begin{bmatrix}
0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 \\
\end{bmatrix}
.\end{align*}%
%
In figure \ref{fig:lp:poly}, the two relaxations are compared for an
examplary code, which is described by the generator and parity-check matrices%
%
\begin{align}
\boldsymbol{G} =
\begin{bmatrix}
0 & 1 & 1
\end{bmatrix} \label{eq:lp:example_code_def_gen} \\[1em]
\boldsymbol{H} =
\begin{bmatrix}
1 & 1 & 1\\
0 & 1 & 1
\end{bmatrix} \label{eq:lp:example_code_def_par}
\end{align}%
%
and has only two possible codewords:
%
\begin{align*}
\mathcal{C} = \left\{ \begin{bmatrix} 0 \\ 0 \\ 0 \end{bmatrix},
\begin{bmatrix} 0 \\ 1 \\ 1 \end{bmatrix} \right\}
.\end{align*}
%
Figure \ref{fig:lp:poly:exact_ilp} shows the domain of exact \ac{ML} decoding.
The first relaxation onto the codeword polytope $\text{poly}\left( \mathcal{C} \right) $
is shown in figure \ref{fig:lp:poly:exact};
this expresses the constraints for the equivalent linear program to exact \ac{ML} decoding.
$\text{poly}\left( \mathcal{C} \right) $ is further relaxed onto the relaxed codeword polytope
$\overline{Q}$, shown in figure \ref{fig:lp:poly:relaxed}.
Figure \ref{fig:lp:poly:local} shows how $\overline{Q}$ is formed by intersecting the
local codeword polytopes of each check node.
%
%
%
% Codeword polytope visualization figure
%
%
\begin{figure}[H]
\centering
%
% Left side - codeword polytope
%
\begin{subfigure}[b]{0.35\textwidth}
\centering
\begin{subfigure}{\textwidth}
\centering
\tikzstyle{codeword} = [color=KITblue, fill=KITblue,
draw, circle, inner sep=0pt, minimum size=4pt]
\tdplotsetmaincoords{60}{25}
\begin{tikzpicture}[scale=0.9, tdplot_main_coords]
% Cube
\coordinate (p000) at (0, 0, 0);
\coordinate (p001) at (0, 0, 2);
\coordinate (p010) at (0, 2, 0);
\coordinate (p011) at (0, 2, 2);
\coordinate (p100) at (2, 0, 0);
\coordinate (p101) at (2, 0, 2);
\coordinate (p110) at (2, 2, 0);
\coordinate (p111) at (2, 2, 2);
\draw[] (p000) -- (p100);
\draw[] (p100) -- (p101);
\draw[] (p101) -- (p001);
\draw[] (p001) -- (p000);
\draw[dashed] (p010) -- (p110);
\draw[] (p110) -- (p111);
\draw[] (p111) -- (p011);
\draw[dashed] (p011) -- (p010);
\draw[dashed] (p000) -- (p010);
\draw[] (p100) -- (p110);
\draw[] (p101) -- (p111);
\draw[] (p001) -- (p011);
% Polytope Vertices
\node[codeword] (c000) at (p000) {};
\node[codeword] (c011) at (p011) {};
% Polytope Annotations
\node[color=KITblue, below=0cm of c000] {$\left( 0, 0, 0 \right) $};
\node[color=KITblue, above=0cm of c011] {$\left( 0, 1, 1 \right) $};
\end{tikzpicture}
\caption{Set of all codewords $\mathcal{C}$}
\label{fig:lp:poly:exact_ilp}
\end{subfigure}\\[1em]
\begin{subfigure}{\textwidth}
\centering
\begin{tikzpicture}
\node (relaxation) at (0, 0) {Relaxation};
\draw (0, 0.61) -- (relaxation);
\draw[->] (relaxation) -- (0, -0.7);
\end{tikzpicture}
\vspace{4mm}
\tikzstyle{codeword} = [color=KITblue, fill=KITblue,
draw, circle, inner sep=0pt, minimum size=4pt]
\tdplotsetmaincoords{60}{25}
\begin{tikzpicture}[scale=0.9, tdplot_main_coords]
% Cube
\coordinate (p000) at (0, 0, 0);
\coordinate (p001) at (0, 0, 2);
\coordinate (p010) at (0, 2, 0);
\coordinate (p011) at (0, 2, 2);
\coordinate (p100) at (2, 0, 0);
\coordinate (p101) at (2, 0, 2);
\coordinate (p110) at (2, 2, 0);
\coordinate (p111) at (2, 2, 2);
\draw[] (p000) -- (p100);
\draw[] (p100) -- (p101);
\draw[] (p101) -- (p001);
\draw[] (p001) -- (p000);
\draw[dashed] (p010) -- (p110);
\draw[] (p110) -- (p111);
\draw[] (p111) -- (p011);
\draw[dashed] (p011) -- (p010);
\draw[dashed] (p000) -- (p010);
\draw[] (p100) -- (p110);
\draw[] (p101) -- (p111);
\draw[] (p001) -- (p011);
% Polytope Vertices
\node[codeword] (c000) at (p000) {};
\node[codeword] (c011) at (p011) {};
% Polytope Edges
\draw[line width=1pt, color=KITblue] (c000) -- (c011);
% Polytope Annotations
\node[color=KITblue, below=0cm of c000] {$\left( 0, 0, 0 \right) $};
\node[color=KITblue, above=0cm of c011] {$\left( 0, 1, 1 \right) $};
\end{tikzpicture}
\caption{Codeword polytope $\text{poly}\left( \mathcal{C} \right) $}
\label{fig:lp:poly:exact}
\end{subfigure}
\end{subfigure} \hfill%
%
%
% Right side - relaxed polytope
%
%
\begin{subfigure}[b]{0.55\textwidth}
\centering
\begin{subfigure}{\textwidth}
\centering
\begin{minipage}{0.5\textwidth}
\centering
\tikzstyle{codeword} = [color=KITblue, fill=KITblue,
draw, circle, inner sep=0pt, minimum size=4pt]
\tdplotsetmaincoords{60}{25}
\begin{tikzpicture}[scale=0.9, tdplot_main_coords]
% Cube
\coordinate (p000) at (0, 0, 0);
\coordinate (p001) at (0, 0, 2);
\coordinate (p010) at (0, 2, 0);
\coordinate (p011) at (0, 2, 2);
\coordinate (p100) at (2, 0, 0);
\coordinate (p101) at (2, 0, 2);
\coordinate (p110) at (2, 2, 0);
\coordinate (p111) at (2, 2, 2);
\draw[] (p000) -- (p100);
\draw[] (p100) -- (p101);
\draw[] (p101) -- (p001);
\draw[] (p001) -- (p000);
\draw[dashed] (p010) -- (p110);
\draw[] (p110) -- (p111);
\draw[] (p111) -- (p011);
\draw[dashed] (p011) -- (p010);
\draw[dashed] (p000) -- (p010);
\draw[] (p100) -- (p110);
\draw[] (p101) -- (p111);
\draw[] (p001) -- (p011);
% Polytope Vertices
\node[codeword] (c000) at (p000) {};
\node[codeword] (c101) at (p101) {};
\node[codeword] (c110) at (p110) {};
\node[codeword] (c011) at (p011) {};
% Polytope Edges & Faces
\draw[line width=1pt, color=KITblue] (c000) -- (c101);
\draw[line width=1pt, color=KITblue] (c000) -- (c110);
\draw[line width=1pt, color=KITblue] (c000) -- (c011);
\draw[line width=1pt, color=KITblue] (c101) -- (c110);
\draw[line width=1pt, color=KITblue] (c101) -- (c011);
\draw[line width=1pt, color=KITblue] (c011) -- (c110);
\fill[KITblue, opacity=0.15] (p000) -- (p101) -- (p011) -- cycle;
\fill[KITblue, opacity=0.15] (p000) -- (p110) -- (p101) -- cycle;
\fill[KITblue, opacity=0.15] (p110) -- (p011) -- (p101) -- cycle;
% Polytope Annotations
\node[color=KITblue, below=0cm of c000] {$\left( 0, 0, 0 \right) $};
\node[color=KITblue, right=0.07cm of c101] {$\left( 1, 0, 1 \right) $};
\node[color=KITblue, right=0cm of c110] {$\left( 1, 1, 0 \right) $};
\node[color=KITblue, above=0cm of c011] {$\left( 0, 1, 1 \right) $};
\end{tikzpicture}
\end{minipage}%
\begin{minipage}{0.5\textwidth}
\centering
\tikzstyle{codeword} = [color=KITblue, fill=KITblue,
draw, circle, inner sep=0pt, minimum size=4pt]
\tdplotsetmaincoords{60}{25}
\begin{tikzpicture}[scale=0.9, tdplot_main_coords]
% Cube
\coordinate (p000) at (0, 0, 0);
\coordinate (p001) at (0, 0, 2);
\coordinate (p010) at (0, 2, 0);
\coordinate (p011) at (0, 2, 2);
\coordinate (p100) at (2, 0, 0);
\coordinate (p101) at (2, 0, 2);
\coordinate (p110) at (2, 2, 0);
\coordinate (p111) at (2, 2, 2);
\draw[] (p000) -- (p100);
\draw[] (p100) -- (p101);
\draw[] (p101) -- (p001);
\draw[] (p001) -- (p000);
\draw[dashed] (p010) -- (p110);
\draw[] (p110) -- (p111);
\draw[] (p111) -- (p011);
\draw[dashed] (p011) -- (p010);
\draw[dashed] (p000) -- (p010);
\draw[] (p100) -- (p110);
\draw[] (p101) -- (p111);
\draw[] (p001) -- (p011);
% Polytope Vertices
\node[codeword] (c000) at (p000) {};
\node[codeword] (c011) at (p011) {};
\node[codeword] (c100) at (p100) {};
\node[codeword] (c111) at (p111) {};
% Polytope Edges & Faces
\draw[line width=1pt, color=KITblue] (c000) -- (c011);
\draw[line width=1pt, color=KITblue] (c000) -- (c100);
\draw[line width=1pt, color=KITblue] (c100) -- (c111);
\draw[line width=1pt, color=KITblue] (c111) -- (c011);
\fill[KITblue, opacity=0.2] (p000) -- (p100) -- (p111) -- (p011) -- cycle;
% Polytope Annotations
\node[color=KITblue, below=0cm of c000] {$\left( 0, 0, 0 \right) $};
\node[color=KITblue, above=0cm of c011] {$\left( 0, 1, 1 \right) $};
\node[color=KITblue, below=0cm of c100] {$\left( 1, 0, 0 \right) $};
\node[color=KITblue, above=0cm of c111] {$\left( 1, 1, 1 \right) $};
\end{tikzpicture}
\end{minipage}
\begin{tikzpicture}
\node[color=KITblue, align=center] at (-2,0)
{$j=1$\\ $\left( c_1 + c_2+ c_3 = 0 \right) $};
\node[color=KITblue, align=center] at (2,0)
{$j=2$\\ $\left(c_2 + c_3 = 0\right)$};
\end{tikzpicture}
\caption{Local codeword polytopes of the check nodes}
\label{fig:lp:poly:local}
\end{subfigure}\\[1em]
\begin{subfigure}{\textwidth}
\centering
\begin{tikzpicture}
\draw[densely dashed] (-2, 0) -- (2, 0);
\draw[densely dashed] (-2, 0.5) -- (-2, 0);
\draw[densely dashed] (2, 0.5) -- (2, 0);
\node (intersection) at (0, -0.5) {Intersection};
\draw[densely dashed] (0, 0) -- (intersection);
\draw[densely dashed, ->] (intersection) -- (0, -1);
\end{tikzpicture}
\vspace{2mm}
\tikzstyle{codeword} = [color=KITblue, fill=KITblue,
draw, circle, inner sep=0pt, minimum size=4pt]
\tikzstyle{pseudocodeword} = [color=KITred, fill=KITred,
draw, circle, inner sep=0pt, minimum size=4pt]
\tdplotsetmaincoords{60}{25}
\begin{tikzpicture}[scale=0.9, tdplot_main_coords]
% Cube
\coordinate (p000) at (0, 0, 0);
\coordinate (p001) at (0, 0, 2);
\coordinate (p010) at (0, 2, 0);
\coordinate (p011) at (0, 2, 2);
\coordinate (p100) at (2, 0, 0);
\coordinate (p101) at (2, 0, 2);
\coordinate (p110) at (2, 2, 0);
\coordinate (p111) at (2, 2, 2);
\draw[] (p000) -- (p100);
\draw[] (p100) -- (p101);
\draw[] (p101) -- (p001);
\draw[] (p001) -- (p000);
\draw[dashed] (p010) -- (p110);
\draw[] (p110) -- (p111);
\draw[] (p111) -- (p011);
\draw[dashed] (p011) -- (p010);
\draw[dashed] (p000) -- (p010);
\draw[] (p100) -- (p110);
\draw[] (p101) -- (p111);
\draw[] (p001) -- (p011);
% Polytope Vertices
\node[codeword] (c000) at (p000) {};
\node[codeword] (c011) at (p011) {};
\node[pseudocodeword] (cpseudo) at (2, 1, 1) {};
% Polytope Edges & Faces
\draw[line width=1pt, color=KITblue] (c000) -- (c011);
\draw[line width=1pt, color=KITred] (cpseudo) -- (c000);
\draw[line width=1pt, color=KITred] (cpseudo) -- (c011);
\fill[KITred, opacity=0.2] (p000) -- (p011) -- (2,1,1) -- cycle;
% Polytope Annotations
\node[color=KITblue, below=0cm of c000] {$\left( 0, 0, 0 \right) $};
\node[color=KITblue, above=0cm of c011] {$\left( 0, 1, 1 \right) $};
\node[color=KITred, right=0cm of cpseudo]
{$\left( 1, \frac{1}{2}, \frac{1}{2} \right) $};
\end{tikzpicture}
\caption{Relaxed codeword polytope $\overline{Q}$}
\label{fig:lp:poly:relaxed}
\end{subfigure}
\end{subfigure}
\vspace*{-2.5cm}
\hspace*{-0.1\textwidth}
\begin{tikzpicture}
\draw[->] (0,0) -- (2.5, 0);
\node[above] at (1.25, 0) {Relaxation};
% Dummy node to make tikzpicture slightly larger
\node[below] at (1.25, 0) {};
\end{tikzpicture}
\vspace{2.5cm}
\caption{Visualization of the codeword polytope and the relaxed codeword
polytope of the code described by equations (\ref{eq:lp:example_code_def_gen})
and (\ref{eq:lp:example_code_def_par})}
\label{fig:lp:poly}
\end{figure}%
%
\noindent It can be seen that the relaxed codeword polytope $\overline{Q}$ introduces
vertices with fractional values;
these represent erroneous non-codeword solutions to the linear program and
correspond to the so-called \textit{pseudo-codewords} introduced in
\cite{feldman_paper}.
However, since for \ac{LDPC} codes $\overline{Q}$ scales linearly with $n$ instead of
exponentially, it is a lot more tractable for practical applications.
The resulting formulation of the relaxed optimization problem becomes%
%
\begin{align}
\begin{aligned}
\text{minimize }\hspace{2mm} & \boldsymbol{\gamma}^\text{T}\tilde{\boldsymbol{c}} \\
\text{subject to }\hspace{2mm} &\boldsymbol{T}_j \tilde{\boldsymbol{c}} \in \mathcal{P}_{d_j}
\hspace{5mm}\forall j\in\mathcal{J}.
\end{aligned} \label{eq:lp:relaxed_formulation}
\end{align}%
One aspect making \ac{LP} decoding especially appealing is the very strong
theoretical guarantee that comes with it, called the
\textit{\ac{ML} certificate property} \cite[Sec. III. B.]{feldman_paper}.
This is the property that when a valid result is produced by an \ac{LP}
decoder, it is always the \ac{ML} codeword.
This leads to an interesting application of \ac{LP} decoding to
approximate \ac{ML} decoding behavior, by successively adding redundant
parity-checks until a valid result is returned \cite[Sec. IV.]{alp}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Decoding Algorithm and Implementation}%
\label{sec:lp:Decoding Algorithm}
The \ac{LP} decoding formulation in section \ref{sec:lp:LP Decoding}
is a very general one that can be solved with a number of different optimization methods.
In this work \ac{ADMM} is examined, as its distributed nature allows for a very efficient
implementation.
\ac{LP} decoding using \ac{ADMM} can be regarded as a message
passing algorithm with separate variable- and check-node update steps;
the resulting algorithm has a striking similarity to \ac{BP} and its computational
complexity has been demonstrated to compare favorably to \ac{BP} \cite{original_admm},
\cite{efficient_lp_dec_admm}.
The \ac{LP} decoding problem in (\ref{eq:lp:relaxed_formulation}) can be
slightly rewritten using the auxiliary variables
$\boldsymbol{z}_1, \ldots, \boldsymbol{z}_m$:%
%
\begin{align}
\begin{aligned}
\begin{array}{r}
\text{minimize }
\end{array}\hspace{0.5mm} & \boldsymbol{\gamma}^\text{T}\tilde{\boldsymbol{c}} \\
\begin{array}{r}
\text{subject to }\\
\phantom{te}
\end{array}\hspace{0.5mm} & \setlength{\arraycolsep}{1.4pt}
\begin{array}{rl}
\boldsymbol{T}_j\tilde{\boldsymbol{c}}
&= \boldsymbol{z}_j\\
\boldsymbol{z}_j
&\in \mathcal{P}_{d_j}
\end{array}
\hspace{5mm} \forall j\in\mathcal{J}.
\end{aligned}
\label{eq:lp:admm_reformulated}
\end{align}
%
In this form, the problem almost fits the \ac{ADMM} template described in section
\ref{sec:theo:Optimization Methods}, except for the fact that there are multiple equality
constraints $\boldsymbol{T}_j \tilde{\boldsymbol{c}} = \boldsymbol{z}_j$ and the
additional constraints $\boldsymbol{z}_j \in \mathcal{P}_{d_j} \, \forall\, j\in\mathcal{J}$.
The multiple constraints can be addressed by introducing additional terms in the
augmented lagrangian:%
%
\begin{align*}
\mathcal{L}_{\mu}\left( \tilde{\boldsymbol{c}}, \left( \boldsymbol{z} \right)_{j=1}^m,
\left( \boldsymbol{\lambda} \right)_{j=1}^m \right)
= \boldsymbol{\gamma}^\text{T}\tilde{\boldsymbol{c}}
+ \sum_{j\in\mathcal{J}} \boldsymbol{\lambda}^\text{T}_j
\left( \boldsymbol{T}_j\tilde{\boldsymbol{c}} - \boldsymbol{z}_j \right)
+ \frac{\mu}{2}\sum_{j\in\mathcal{J}}
\lVert \boldsymbol{T}_j\tilde{\boldsymbol{c}} - \boldsymbol{z}_j \rVert^2_2
.\end{align*}%
%
The additional constraints remain in the dual optimization problem:%
%
\begin{align*}
\text{maximize } \min_{\substack{\tilde{\boldsymbol{c}} \\
\boldsymbol{z}_j \in \mathcal{P}_{d_j}\,\forall\,j\in\mathcal{J}}}
\mathcal{L}_{\mu}\left( \tilde{\boldsymbol{c}}, \left( \boldsymbol{z} \right)_{j=1}^m,
\left( \boldsymbol{\lambda} \right)_{j=1}^m \right)
.\end{align*}%
%
The steps to solve the dual problem then become:
%
\begin{alignat*}{3}
\tilde{\boldsymbol{c}} &\leftarrow \argmin_{\tilde{\boldsymbol{c}}}
\mathcal{L}_{\mu} \left(
\tilde{\boldsymbol{c}}, \left( \boldsymbol{z} \right)_{j=1}^m,
\left( \boldsymbol{\lambda}\right)_{j=1}^m \right) \\
\boldsymbol{z}_j &\leftarrow \argmin_{\boldsymbol{z}_j \in \mathcal{P}_{d_j}}
\mathcal{L}_{\mu} \left(
\tilde{\boldsymbol{c}}, \left( \boldsymbol{z} \right)_{j=1}^m,
\left( \boldsymbol{\lambda} \right)_{j=1}^m \right)
\hspace{3mm} &&\forall j\in\mathcal{J} \\
\boldsymbol{\lambda}_j &\leftarrow \boldsymbol{\lambda}_j
+ \mu\left( \boldsymbol{T}_j\tilde{\boldsymbol{c}}
- \boldsymbol{z}_j \right)
\hspace{3mm} &&\forall j\in\mathcal{J}
.\end{alignat*}
%
Luckily, the additional constraints only affect the $\boldsymbol{z}_j$-update steps.
Furthermore, the $\boldsymbol{z}_j$-update steps can be shown to be equivalent to projections
onto the check polytopes $\mathcal{P}_{d_j}$
and the $\tilde{\boldsymbol{c}}$-update can be computed analytically%
%
\footnote{In the $\tilde{c}_i$-update rule, the term
$\left( \boldsymbol{z}_j \right)_i$ is a slight abuse of notation, as
$\boldsymbol{z}_j$ has less components than there are variable-nodes $i$.
What is actually meant is the component of $\boldsymbol{z}_j$ that is associated
with the variable node $i$, i.e., $\left( \boldsymbol{T}_j^\text{T}\boldsymbol{z}_j\right)_i$.
The same is true for $\left( \boldsymbol{\lambda}_j \right)_i$.}
%
\cite[Sec. III. B.]{original_admm}:%
%
\begin{alignat*}{3}
\tilde{c}_i &\leftarrow \frac{1}{d_i} \left(
\sum_{j\in N_v\left( i \right) } \Big( \left( \boldsymbol{z}_j \right)_i
- \frac{1}{\mu} \left( \boldsymbol{\lambda}_j \right)_i \Big)
- \frac{\gamma_i}{\mu} \right)
\hspace{3mm} && \forall i\in\mathcal{I} \\
\boldsymbol{z}_j &\leftarrow \Pi_{\mathcal{P}_{d_j}}\left(
\boldsymbol{T}_j\tilde{\boldsymbol{c}} + \frac{\boldsymbol{\lambda}_j}{\mu} \right)
\hspace{3mm} && \forall j\in\mathcal{J} \\
\boldsymbol{\lambda}_j &\leftarrow \boldsymbol{\lambda}_j
+ \mu\left( \boldsymbol{T}_j\tilde{\boldsymbol{c}}
- \boldsymbol{z}_j \right)
\hspace{3mm} && \forall j\in\mathcal{J}
.\end{alignat*}
%
It should be noted that all of the $\boldsymbol{z}_j$-updates can be computed simultaneously,
as they are independent of one another.
The same is true for the updates of the individual components of $\tilde{\boldsymbol{c}}$.
This representation can be slightly simplified by substituting
$\boldsymbol{\lambda}_j = \mu \cdot \boldsymbol{u}_j \,\forall\,j\in\mathcal{J}$:%
%
\begin{alignat}{3}
\tilde{c}_i &\leftarrow \frac{1}{d_i} \left(
\sum_{j\in N_v\left( i \right) } \Big( \left( \boldsymbol{z}_j \right)_i
- \left( \boldsymbol{u}_j \right)_i \Big)
- \frac{\gamma_i}{\mu} \right)
\hspace{3mm} && \forall i\in\mathcal{I} \label{eq:admm:c_update}\\
\boldsymbol{z}_j &\leftarrow \Pi_{\mathcal{P}_{d_j}}\left(
\boldsymbol{T}_j\tilde{\boldsymbol{c}} + \boldsymbol{u}_j \right)
\hspace{3mm} && \forall j\in\mathcal{J} \label{eq:admm:z_update}\\
\boldsymbol{u}_j &\leftarrow \boldsymbol{u}_j
+ \boldsymbol{T}_j\tilde{\boldsymbol{c}}
- \boldsymbol{z}_j
\hspace{3mm} && \forall j\in\mathcal{J} \label{eq:admm:u_update}
.\end{alignat}
%
The reason \ac{ADMM} is able to perform so well is due to the relocation of the constraints
$\boldsymbol{T}_j\tilde{\boldsymbol{c}}_j\in\mathcal{P}_{d_j}\,\forall\, j\in\mathcal{J}$
into the objective function itself.
The minimization of the new objective function can then take place simultaneously
with respect to all $\boldsymbol{z}_j, j\in\mathcal{J}$.
Effectively, all of the $\left|\mathcal{J}\right|$ parity constraints can be
handled at the same time.
This can also be understood by interpreting the decoding process as a message-passing
algorithm \cite[Sec. III. D.]{original_admm}, \cite[Sec. II. B.]{efficient_lp_dec_admm},
depicted in algorithm \ref{alg:admm}.
\begin{genericAlgorithm}[caption={\ac{LP} decoding using \ac{ADMM} interpreted
as a message passing algorithm\protect\footnotemark{}}, label={alg:admm},
basicstyle=\fontsize{11}{16}\selectfont
]
Initialize $\tilde{\boldsymbol{c}}, \boldsymbol{z}_{[1:m]}$ and $\boldsymbol{u}_{[1:m]}$
while $\sum_{j\in\mathcal{J}} \lVert \boldsymbol{T}_j\tilde{\boldsymbol{c}} - \boldsymbol{z}_j \rVert_2 \ge \epsilon_{\text{pri}}$ or $\sum_{j\in\mathcal{J}} \lVert \boldsymbol{z}^\prime_j - \boldsymbol{z}_j \rVert_2 \ge \epsilon_{\text{dual}}$ do
for $j$ in $\mathcal{J}$ do
$\boldsymbol{z}_j \leftarrow \Pi_{\mathcal{P}_{d_j}}\left(
\boldsymbol{T}_j\tilde{\boldsymbol{c}} + \boldsymbol{u}_j \right)$
$\boldsymbol{u}_j \leftarrow \boldsymbol{u}_j
+ \boldsymbol{T}_j\tilde{\boldsymbol{c}}
- \boldsymbol{z}_j$
end for
for $i$ in $\mathcal{I}$ do
$\tilde{c}_i \leftarrow \frac{1}{d_i} \left(
\sum_{j\in N_v\left( i \right) } \Big(
\left( \boldsymbol{z}_j \right)_i - \left( \boldsymbol{u}_j
\right)_i
\Big) - \frac{\gamma_i}{\mu} \right)$
end for
end while
return $\tilde{\boldsymbol{c}}$
\end{genericAlgorithm}
%
\footnotetext{$\epsilon_{\text{pri}} > 0$ and $\epsilon_{\text{dual}} > 0$
are additional parameters
defining the tolerances for the stopping criteria of the algorithm.
The variable $\boldsymbol{z}_j^\prime$ denotes the value of
$\boldsymbol{z}_j$ in the previous iteration.}%
%
\noindent The $\boldsymbol{z}_j$- and $\boldsymbol{\lambda}_j$-updates can be understood as
a check-node update step (lines $3$-$6$) and the $\tilde{c}_i$-updates can be understood as
a variable-node update step (lines $7$-$9$ in figure \ref{alg:admm}).
The updates for each variable- and check-node can be perfomed in parallel.
A technique called \textit{over-relaxation} can be employed to further improve
convergence, introducing the over-relaxation parameter $\rho$.
This consists of computing the term
$\rho \boldsymbol{T}_j \tilde{\boldsymbol{c}} - \left( 1 - \rho \right)\boldsymbol{z}_j$
before the $\boldsymbol{z}_j$ and $\boldsymbol{u}_j$ update steps (lines 4 and
5 of algorithm \ref{alg:admm}) and
subsequently replacing $\boldsymbol{T}_j \tilde{\boldsymbol{c}}$ with the
computed value in the two updates \cite[Sec. 3.4.3]{distr_opt_book}.
The main computational effort in solving the linear program amounts to
computing the projection operation $\Pi_{\mathcal{P}_{d_j}} \left( \cdot \right) $
onto each check polytope. Various different methods to perform this projection
have been proposed (e.g., in \cite{original_admm}, \cite{efficient_lp_dec_admm},
\cite{lautern}).
The method chosen here is the one presented in \cite{original_admm}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\section{Implementation Details}%
%\label{sec:lp:Implementation Details}
The development process used to implement this decoding algorithm was the same
as outlined in section
\ref{sec:prox:Decoding Algorithm} for proximal decoding.
First, an initial version was implemented in Python, before repeating the
process using C++ to achieve higher performance.
Again, the performance can be increased by reframing the operations in such
a way that the computation can take place primarily with element-wise
operations and matrix-vector multiplication, since these operations
are highly optimized in the software libraries used for the implementation.
In the summation operation in line 8 of algorithm \ref{alg:admm}, the
components of each $\boldsymbol{z}_j$ and $\boldsymbol{u}_j$ relating to a
given \ac{VN} $i$ have to be found.
This operation can be streamlined by observing that the transfer matrices
$\boldsymbol{T}_j,\hspace{1mm}j\in\mathcal{J}$ are able to perform the mapping
they were devised for in both directions:
with $\boldsymbol{T}_j \tilde{\boldsymbol{c}}$, the $d_j$ components of
$\tilde{\boldsymbol{c}}$ required for parity check $i$ are selected;
with $\boldsymbol{T}_j^\text{T} \boldsymbol{z}_j$, the $d_j$ components of
$\boldsymbol{z}_j$ can be mapped onto a vector of length $n$, each component
at the position corresponding to the \ac{VN} it relates to.
Using this observation, the sum can be written as%
%
\begin{align*}
\sum_{j\in N_v\left( i \right) }\left( \boldsymbol{T}_j^\text{T} \left( \boldsymbol{z}_j
- \boldsymbol{u}_j \right) \right)_i
.\end{align*}
Further noticing that the vectors
$\boldsymbol{T}_j^\text{T}\left( \boldsymbol{z}_j - \boldsymbol{u}_j \right)$
unrelated to \ac{VN} $i$ have $0$ as the $i$th component, the set of indices
the summation takes place over can be extended to $\mathcal{J}$, allowing the
expression to be rewritten as%
%
\begin{align*}
\sum_{j\in \mathcal{J}}\left( \boldsymbol{T}_j^\text{T} \left( \boldsymbol{z}_j
- \boldsymbol{u}_j \right) \right)_i
= \left( \sum_{j\in\mathcal{J}} \boldsymbol{T}_j^\text{T}
\left( \boldsymbol{z}_j - \boldsymbol{u}_j \right) \right)_i
.\end{align*}
%
Defining%
\footnote{
In this case $d_1, \ldots, d_n$ refer to the degree of the variable nodes,
i.e., $d_i,\hspace{1mm}i\in\mathcal{I}$.
}
%
\begin{align*}
\boldsymbol{d} := \begin{bmatrix}
d_1 \\
\vdots \\
d_n
\end{bmatrix}%
\hspace{5mm}%
\text{and}%
\hspace{5mm}%
\boldsymbol{s} := \sum_{j\in\mathcal{J}} \boldsymbol{T}_j^\text{T}
\left( \boldsymbol{z}_j - \boldsymbol{u}_j \right)
,\end{align*}%
%
the $\tilde{\boldsymbol{c}}$ update can then be rewritten as%
%
\begin{align*}
\tilde{\boldsymbol{c}} \leftarrow \boldsymbol{d}^{\circ \left(-1\right)} \circ
\left( \boldsymbol{s} - \frac{1}{\mu}\boldsymbol{\gamma} \right)
.\end{align*}
%
This modified version of the decoding process is depicted in algorithm \ref{alg:admm:mod}.
\begin{genericAlgorithm}[caption={The \ac{LP} decoding using \ac{ADMM} algorithm with rewritten
update steps}, label={alg:admm:mod},
basicstyle=\fontsize{11}{16}\selectfont
]
Initialize $\tilde{\boldsymbol{c}}, \boldsymbol{z}_{[1:m]}$ and $\boldsymbol{u}_{[1:m]}$
while $\sum_{j\in\mathcal{J}} \lVert \boldsymbol{T}_j\tilde{\boldsymbol{c}}
- \boldsymbol{z}_j \rVert_2 \ge \epsilon_{\text{pri}}$ or $\sum_{j\in\mathcal{J}}
\lVert \boldsymbol{z}^\prime_j - \boldsymbol{z}_j \rVert_2 \ge \epsilon_{\text{dual}}$ do
$\boldsymbol{s} \leftarrow \boldsymbol{0}$
for $j$ in $\mathcal{J}$ do
$\boldsymbol{z}_j \leftarrow \Pi_{\mathcal{P}_{d_j}}\left(
\boldsymbol{T}_j\tilde{\boldsymbol{c}} + \boldsymbol{u}_j \right)$
$\boldsymbol{u}_j \leftarrow \boldsymbol{u}_j
+ \boldsymbol{T}_j\tilde{\boldsymbol{c}}
- \boldsymbol{z}_j$
$\boldsymbol{s} \leftarrow \boldsymbol{s} + \boldsymbol{T}_j
\left( \boldsymbol{z}_j - \boldsymbol{u}_j \right) $
end for
for $i$ in $\mathcal{I}$ do
$\tilde{\boldsymbol{c}} \leftarrow \boldsymbol{d}^{\circ \left( -1\right)} \circ
\left( \boldsymbol{s} - \frac{1}{\mu}\boldsymbol{\gamma} \right) $
end for
end while
return $\tilde{\boldsymbol{c}}$
\end{genericAlgorithm}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Analysis and Simulation Results}%
\label{sec:lp:Analysis and Simulation Results}
In this section, \ac{LP} decoding using \ac{ADMM} is examined based on
simulation results for various codes.
First, the effect of the different parameters and how their values should be
chosen is investigated.
Subsequently, the decoding performance is observed and compared to that of
\ac{BP}.
Finally, the computational performance of the implementation and time
complexity of the algorithm are studied.
As was the case in chapter \ref{chapter:proximal_decoding} for proximal decoding,
the following simulation results are based on Monte Carlo simulations
and the BER and FER curves have been generated by producing at least 100
frame errors for each data point, except in cases where this is explicitly
specified otherwise.
\subsection{Choice of Parameters}
The first two parameters to be investigated are the penalty parameter $\mu$
and the over-relaxation parameter $\rho$.
A first approach to get some indication of the values that might be chosen
for these parameters is to look at how the decoding performance depends
on them.
The \ac{FER} is plotted as a function of $\mu$ and $\rho$ in figure
\ref{fig:admm:mu_rho}, for three different \acp{SNR}.
The code chosen for this examination is a (3,6) regular \ac{LDPC} code with
$n=204$ and $k=102$ \cite[\text{204.33.484}]{mackay_enc}.
When varying $\mu$, $\rho$ is set to 1 and when varying
$\rho$, $\mu$ is set to 5.
The maximum number of iterations $K$ is set to 200 and
$\epsilon_\text{dual}$ and $\epsilon_\text{pri}$ to $10^{-5}$.
The behavior that can be observed is very similar to that of the
parameter $\gamma$ in proximal decoding, analyzed in section
\ref{sec:prox:Analysis and Simulation Results}.
A single optimal value giving optimal performance does not exist; rather,
as long as the value is chosen within a certain range, the performance is
approximately equally good.
\begin{figure}[H]
\centering
\begin{subfigure}[c]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\mu$}, ylabel={\acs{FER}},
ymode=log,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[ForestGreen, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=mu, y=FER,
discard if not={SNR}{2.0},]
{res/admm/ber_2d_20433484.csv};
\addplot[RedOrange, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=mu, y=FER,
discard if not={SNR}{3.0},]
{res/admm/ber_2d_20433484.csv};
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=mu, y=FER,
discard if not={SNR}{4.0},]
{res/admm/ber_2d_20433484.csv};
\end{axis}
\end{tikzpicture}
\end{subfigure}%
\hfill%
\begin{subfigure}[c]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\rho$}, ylabel={\acs{FER}},
ymode=log,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[ForestGreen, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=FER,
discard if not={SNR}{2.0},]
{res/admm/ber_2d_20433484_rho.csv};
\addplot[RedOrange, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=FER,
discard if not={SNR}{3.0},]
{res/admm/ber_2d_20433484_rho.csv};
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=FER,
discard if not={SNR}{4.0},]
{res/admm/ber_2d_20433484_rho.csv};
\end{axis}
\end{tikzpicture}
\end{subfigure}%
\begin{subfigure}[t]{\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[hide axis,
xmin=10, xmax=50,
ymin=0, ymax=0.4,
legend columns=3,
legend style={draw=white!15!black,legend cell align=left}]
\addlegendimage{ForestGreen, line width=1pt, densely dashed, mark=*}
\addlegendentry{$E_b / N_0 = \SI{2}{dB}$}
\addlegendimage{RedOrange, line width=1pt, densely dashed, mark=*}
\addlegendentry{$E_b / N_0 = \SI{3}{dB}$}
\addlegendimage{NavyBlue, line width=1pt, densely dashed, mark=*}
\addlegendentry{$E_b / N_0 = \SI{4}{dB}$}
\end{axis}
\end{tikzpicture}
\end{subfigure}
\caption{Dependence of the decoding performance on the parameters $\mu$ and $\rho$.
(3,6) regular \ac{LDPC} code with $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}}
\label{fig:admm:mu_rho}
\end{figure}%
To aid in the choice of the parameters, an additional criterion can be used:
the number of iterations performed for a decoding operation.
This is directly related to the time needed to decode a received vector
$\boldsymbol{y}$, which the aim is to minimize.
Figure \ref{fig:admm:mu_rho_iterations} shows the average number of iterations
over $\SI{1000}{}$ decodings, as a function of $\rho$.
This time the \ac{SNR} is kept constant at $\SI{4}{dB}$ and the parameter
$\mu$ is varied.
The values chosen for the rest of the parameters are the same as before.
It is visible that choosing a large value for $\rho$ as well as a small value
for $\mu$ minimizes the average number of iterations and thus the average
run time of the decoding process.
The same behavior can be observed when looking at various%
%
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\rho$}, ylabel={Average \# of iterations},
ymode=log,
width=0.6\textwidth,
height=0.45\textwidth,
]
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{9.0},]
{res/admm/mu_rho_kavg_20433484.csv};
\addlegendentry{$\mu = 9$}
\addplot[RedOrange, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{5.0},]
{res/admm/mu_rho_kavg_20433484.csv};
\addlegendentry{$\mu = 5$}
\addplot[ForestGreen, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{2.0},]
{res/admm/mu_rho_kavg_20433484.csv};
\addlegendentry{$\mu = 2$}
\end{axis}
\end{tikzpicture}
\caption{Dependence of the average number of iterations required on $\mu$ and $\rho$
for $E_b / N_0 = \SI{4}{dB}$. (3,6) regular \ac{LDPC} code with $n=204, k=102$
\cite[\text{204.33.484}]{mackay_enc}}
\label{fig:admm:mu_rho_iterations}
\end{figure}%
%
\noindent different codes, as shown in figure \ref{fig:admm:mu_rho_multiple}.
To get an estimate for the maximum number of iterations $K$ necessary,
the average error during decoding can be used.
This is shown in figure \ref{fig:admm:avg_error} as an average of
$\SI{100000}{}$ decodings.
$\mu$ is set to 5 and $\rho$ is set to $1$ and the rest of the parameters are
again chosen as $\epsilon_\text{pri}=10^{-5}$ and
$\epsilon_\text{dual}=10^{-5}$.
Similarly to the results in section \ref{subsec:prox:choice}, a dip is
visible around the $20$ iteration mark.
This is due to the fact that as the number of iterations increases,
more and more decodings converge, leaving only the mistaken ones to be
averaged.
The point at which the wrong decodings start to become dominant and the
decoding performance does not increase any longer is largely independent of
the \ac{SNR}, allowing the maximum number of iterations to be chosen without
considering the \ac{SNR}.
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
width=0.6\textwidth,
height=0.45\textwidth,
xlabel={Iteration}, ylabel={Average $\left\Vert \hat{\boldsymbol{c}}
- \boldsymbol{c} \right\Vert$}
]
\addplot[ForestGreen, line width=1pt]
table [col sep=comma, x=k, y=err,
discard if not={SNR}{1.0},
discard if gt={k}{100}]
{res/admm/avg_error_20433484.csv};
\addlegendentry{$E_b / N_0 = \SI{1}{dB}$}
\addplot[RedOrange, line width=1pt]
table [col sep=comma, x=k, y=err,
discard if not={SNR}{2.0},
discard if gt={k}{100}]
{res/admm/avg_error_20433484.csv};
\addlegendentry{$E_b / N_0 = \SI{2}{dB}$}
\addplot[NavyBlue, line width=1pt]
table [col sep=comma, x=k, y=err,
discard if not={SNR}{3.0},
discard if gt={k}{100}]
{res/admm/avg_error_20433484.csv};
\addlegendentry{$E_b / N_0 = \SI{3}{dB}$}
\addplot[RoyalPurple, line width=1pt]
table [col sep=comma, x=k, y=err,
discard if not={SNR}{4.0},
discard if gt={k}{100}]
{res/admm/avg_error_20433484.csv};
\addlegendentry{$E_b / N_0 = \SI{4}{dB}$}
\end{axis}
\end{tikzpicture}
\caption{Average error for $\SI{100000}{}$ decodings. (3,6)
regular \ac{LDPC} code with $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}}
\label{fig:admm:avg_error}
\end{figure}%
The last two parameters remaining to be examined are the tolerances for the
stopping criterion of the algorithm, $\epsilon_\text{pri}$ and
$\epsilon_\text{dual}$.
These are both set to the same value $\epsilon$.
The effect of their value on the decoding performance is visualized in figure
\ref{fig:admm:epsilon}.
All parameters except $\epsilon_\text{pri}$ and $\epsilon_\text{dual}$ are
kept constant, with $\mu=5$, $\rho=1$ and $E_b / N_0 = \SI{4}{dB}$ and
performing a maximum of 200 iterations.
A lower value for the tolerance initially leads to a dramatic decrease in the
\ac{FER}, this effect fading as the tolerance becomes increasingly lower.
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\epsilon$}, ylabel={\acs{FER}},
ymode=log,
xmode=log,
x dir=reverse,
width=0.6\textwidth,
height=0.45\textwidth,
]
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=epsilon, y=FER,
discard if not={SNR}{3.0},]
{res/admm/fer_epsilon_20433484.csv};
\end{axis}
\end{tikzpicture}
\caption{Effect of the value of the parameters $\epsilon_\text{pri}$ and
$\epsilon_\text{dual}$ on the \acs{FER}. (3,6) regular \ac{LDPC} code with
$n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc}}
\label{fig:admm:epsilon}
\end{figure}%
In conclusion, the parameters $\mu$ and $\rho$ should be chosen comparatively
small and large, respectively, to reduce the average runtime of the decoding
process, while keeping them within a certain range as to not compromise the
decoding performance.
The maximum number of iterations performed can be chosen independently
of the \ac{SNR}.
Finally, small values should be given to the parameters
$\epsilon_{\text{pri}}$ and $\epsilon_{\text{dual}}$ to achieve the lowest
possible error rate.
\subsection{Decoding Performance}
In figure \ref{fig:admm:results}, the simulation results for the ``Margulis''
\ac{LDPC} code ($n=2640$, $k=1320$) presented by Barman et al. in
\cite{original_admm} are compared to the results from the simulations
conducted in the context of this thesis.
The parameters chosen were $\mu=3.3$, $\rho=1.9$, $K=1000$,
$\epsilon_\text{pri}=10^{-5}$ and $\epsilon_\text{dual}=10^{-5}$,
the same as in \cite{original_admm}.
The two \ac{FER} curves are practically identical.
Also shown is the curve resulting from \ac{BP} decoding, performing
1000 iterations.
The two algorithms perform relatively similarly, staying within $\SI{0.5}{dB}$
of one another.
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$E_b / N_0 \left( \text{dB} \right) $}, ylabel={\acs{FER}},
ymode=log,
width=0.6\textwidth,
height=0.45\textwidth,
legend style={at={(0.5,-0.57)},anchor=south},
legend cell align={left},
]
\addplot[Turquoise, line width=1pt, mark=*]
table [col sep=comma, x=SNR, y=FER,
discard if gt={SNR}{2.2},
]
{res/admm/fer_paper_margulis.csv};
\addlegendentry{\acs{ADMM} (Barman et al.)}
\addplot[NavyBlue, densely dashed, line width=1pt, mark=triangle]
table [col sep=comma, x=SNR, y=FER,]
{res/admm/ber_margulis264013203.csv};
\addlegendentry{\acs{ADMM} (Own results)}
\addplot[RoyalPurple, line width=1pt, mark=*]
table [col sep=comma, x=SNR, y=FER, discard if gt={SNR}{2.2},]
{res/generic/fer_bp_mackay_margulis.csv};
\addlegendentry{\acs{BP} (Barman et al.)}
\end{axis}
\end{tikzpicture}
\caption{Comparison of datapoints from Barman et al. with own simulation results.
``Margulis'' \ac{LDPC} code with $n = 2640$, $k = 1320$
\cite[\text{Margulis2640.1320.3}]{mackay_enc}}
\label{fig:admm:results}
\end{figure}%
%
In figure \ref{fig:admm:bp_multiple}, \ac{FER} curves for \ac{LP} decoding
using \ac{ADMM} and \ac{BP} are shown for various codes.
To ensure comparability, in all cases the number of iterations was set to
$K=200$.
The values of the other parameters were chosen as $\mu = 5$, $\rho = 1$,
$\epsilon_\text{pri} = 10^{-5}$ and $\epsilon_\text{dual}=10^{-5}$.
Comparing the simulation results for the different codes, it is apparent that
the difference in decoding performance depends on the code being
considered.
For all codes considered here, however, the performance of \ac{LP} decoding
using \ac{ADMM} comes close to that of \ac{BP}, again staying withing
approximately $\SI{0.5}{dB}$.
\subsection{Computational Performance}
\label{subsec:admm:comp_perf}
In terms of time complexity, the three steps of the decoding algorithm
in equations (\ref{eq:admm:c_update}) - (\ref{eq:admm:u_update}) have to be
considered.
The $\tilde{\boldsymbol{c}}$- and $\boldsymbol{u}_j$-update steps are
$\mathcal{O}\left( n \right)$ \cite[Sec. III. C.]{original_admm}.
The complexity of the $\boldsymbol{z}_j$-update step depends on the projection
algorithm employed.
Since for the implementation completed for this work the projection algorithm
presented in \cite{original_admm} is used, the $\boldsymbol{z}_j$-update step
also has linear time complexity.
\begin{figure}[H]
\centering
\begin{tikzpicture}
\begin{axis}[grid=both,
xlabel={$n$}, ylabel={Time per frame (s)},
width=0.6\textwidth,
height=0.45\textwidth,
legend style={at={(0.5,-0.42)},anchor=south},
legend cell align={left},]
\addplot[NavyBlue, only marks, mark=triangle*]
table [col sep=comma, x=n, y=spf]
{res/admm/fps_vs_n.csv};
\end{axis}
\end{tikzpicture}
\caption{Timing requirements of the \ac{LP} decoding using \ac{ADMM} implementation}
\label{fig:admm:time}
\end{figure}%
Simulation results from a range of different codes can be used to verify this
analysis.
Figure \ref{fig:admm:time} shows the average time needed to decode one
frame as a function of its length.
The codes used for this consideration are the same as in section \ref{subsec:prox:comp_perf}
The results are necessarily skewed because these vary not only
in their length, but also in their construction scheme and rate.
Additionally, different optimization opportunities arise depending on the
length of a code, since for smaller codes dynamic memory allocation can be
completely omitted.
This may explain why the datapoint at $n=504$ is higher then would be expected
with linear behavior.
Nonetheless, the simulation results roughly match the expected behavior
following from the theoretical considerations.
\begin{figure}[H]
\centering
\vspace*{5cm}
\end{figure}
\begin{figure}[H]
\centering
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\rho$}, ylabel={Average \# of iterations},
ymode=log,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{9.0},]
{res/admm/mu_rho_kavg_963965.csv};
\addplot[RedOrange, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{5.0},]
{res/admm/mu_rho_kavg_963965.csv};
\addplot[ForestGreen, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{2.0},]
{res/admm/mu_rho_kavg_963965.csv};
\end{axis}
\end{tikzpicture}
\caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=96, k=48$
\cite[\text{96.3.965}]{mackay_enc}}
\end{subfigure}%
\hfill
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\rho$}, ylabel={Average \# of iterations},
ymode=log,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{9.0},]
{res/admm/mu_rho_kavg_bch_31_26.csv};
\addplot[RedOrange, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{5.0},]
{res/admm/mu_rho_kavg_bch_31_26.csv};
\addplot[ForestGreen, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{2.0},]
{res/admm/mu_rho_kavg_bch_31_26.csv};
\end{axis}
\end{tikzpicture}
\caption{BCH code with $n=31, k=26$\\[2\baselineskip]}
\end{subfigure}
\vspace{3mm}
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\rho$}, ylabel={Average \# of iterations},
ymode=log,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{9.0},]
{res/admm/mu_rho_kavg_20433484.csv};
\addplot[RedOrange, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{5.0},]
{res/admm/mu_rho_kavg_20433484.csv};
\addplot[ForestGreen, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{2.0},]
{res/admm/mu_rho_kavg_20433484.csv};
\end{axis}
\end{tikzpicture}
\caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=204, k=102$
\cite[\text{204.33.484}]{mackay_enc}}
\end{subfigure}%
\hfill
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\rho$}, ylabel={Average \# of iterations},
ymode=log,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{9.0},]
{res/admm/mu_rho_kavg_20455187.csv};
\addplot[RedOrange, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{5.0},]
{res/admm/mu_rho_kavg_20455187.csv};
\addplot[ForestGreen, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{2.0},]
{res/admm/mu_rho_kavg_20455187.csv};
\end{axis}
\end{tikzpicture}
\caption{$\left( 5, 10 \right)$-regular \ac{LDPC} code with $n=204, k=102$
\cite[\text{204.55.187}]{mackay_enc}}
\end{subfigure}%
\vspace{3mm}
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\rho$}, ylabel={Average \# of iterations},
ymode=log,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{9.0},]
{res/admm/mu_rho_kavg_40833844.csv};
\addplot[RedOrange, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{5.0},]
{res/admm/mu_rho_kavg_40833844.csv};
\addplot[ForestGreen, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{2.0},]
{res/admm/mu_rho_kavg_40833844.csv};
\end{axis}
\end{tikzpicture}
\caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=408, k=204$
\cite[\text{408.33.844}]{mackay_enc}}
\end{subfigure}%
\hfill
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$\rho$}, ylabel={Average \# of iterations},
ymode=log,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[NavyBlue, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{9.0},]
{res/admm/mu_rho_kavg_pegreg252x504.csv};
\addplot[RedOrange, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{5.0},]
{res/admm/mu_rho_kavg_pegreg252x504.csv};
\addplot[ForestGreen, line width=1pt, densely dashed, mark=*]
table [col sep=comma, x=rho, y=k_avg,
discard if not={mu}{2.0},]
{res/admm/mu_rho_kavg_pegreg252x504.csv};
\end{axis}
\end{tikzpicture}
\caption{LDPC code (progressive edge growth construction) with $n=504, k=252$
\cite[\text{PEGReg252x504}]{mackay_enc}}
\end{subfigure}%
\vspace{5mm}
\begin{subfigure}[t]{\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[hide axis,
xmin=10, xmax=50,
ymin=0, ymax=0.4,
legend style={draw=white!15!black,legend cell align=left}]
\addlegendimage{NavyBlue, line width=1.5pt, densely dashed, mark=*}
\addlegendentry{$\mu = 9$};
\addlegendimage{RedOrange, line width=1.5pt, densely dashed, mark=*}
\addlegendentry{$\mu = 5$};
\addlegendimage{ForestGreen, line width=1.5pt, densely dashed, mark=*}
\addlegendentry{$\mu = 2$};
\end{axis}
\end{tikzpicture}
\end{subfigure}
\caption{Dependence of the average number of iterations required on the parameters
$\mu$ and $\rho$ for $E_b / N_0 = \SI{4}{dB}$ for various codes}
\label{fig:admm:mu_rho_multiple}
\end{figure}
\vfill
\newpage
\begin{figure}[H]
\centering
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$E_b / N_0$ (dB)}, ylabel={FER},
ymode=log,
ymax=1.5, ymin=8e-5,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[Turquoise, line width=1pt, mark=*]
table [x=SNR, y=FER, col sep=comma, discard if not={mu}{3.0}]
%{res/hybrid/2d_ber_fer_dfr_963965.csv};
{res/admm/ber_2d_963965.csv};
\addplot [RoyalPurple, mark=*, line width=1pt]
table [x=SNR, y=FER, col sep=comma]
{res/generic/bp_963965.csv};
\end{axis}
\end{tikzpicture}
\caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=96, k=48$
\cite[\text{96.3.965}]{mackay_enc}}
\end{subfigure}%
\hfill%
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$E_b / N_0$ (dB)}, ylabel={FER},
ymode=log,
ymax=1.5, ymin=8e-5,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[Turquoise, line width=1pt, mark=*]
table [x=SNR, y=FER, col sep=comma, discard if not={mu}{3.0}]
{res/admm/ber_2d_bch_31_26.csv};
\addplot [RoyalPurple, mark=*, line width=1pt]
table [x=SNR, y=FER, col sep=comma]
{res/generic/bp_bch_31_26.csv};
\end{axis}
\end{tikzpicture}
\caption{BCH code with $n=31, k=26$}
\end{subfigure}%
\vspace{3mm}
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$E_b / N_0$ (dB)}, ylabel={FER},
ymode=log,
ymax=1.5, ymin=8e-5,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[Turquoise, line width=1pt, mark=*]
table [x=SNR, y=FER, col sep=comma,
discard if not={mu}{3.0},
discard if gt={SNR}{5.5}]
{res/admm/ber_2d_20433484.csv};
\addplot [RoyalPurple, mark=*, line width=1pt]
table [x=SNR, y=FER, col sep=comma]
{res/generic/bp_20433484.csv};
\end{axis}
\end{tikzpicture}
\caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=204, k=102$
\cite[\text{204.33.484}]{mackay_enc}}
\end{subfigure}%
\hfill%
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$E_b / N_0$ (dB)}, ylabel={FER},
ymode=log,
ymax=1.5, ymin=8e-5,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[Turquoise, line width=1pt, mark=*]
table [x=SNR, y=FER, col sep=comma, discard if not={mu}{3.0}]
{res/admm/ber_2d_20455187.csv};
\addplot [RoyalPurple, mark=*, line width=1pt,
discard if gt={SNR}{5}]
table [x=SNR, y=FER, col sep=comma]
{res/generic/bp_20455187.csv};
\end{axis}
\end{tikzpicture}
\caption{$\left( 5, 10 \right)$-regular \ac{LDPC} code with $n=204, k=102$
\cite[\text{204.55.187}]{mackay_enc}}
\end{subfigure}%
\vspace{3mm}
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$E_b / N_0$ (dB)}, ylabel={FER},
ymode=log,
ymax=1.5, ymin=8e-5,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[Turquoise, line width=1pt, mark=*]
table [x=SNR, y=FER, col sep=comma, discard if not={mu}{3.0}]
{res/admm/ber_2d_40833844.csv};
\addplot [RoyalPurple, mark=*, line width=1pt,
discard if gt={SNR}{3}]
table [x=SNR, y=FER, col sep=comma]
{res/generic/bp_40833844.csv};
\end{axis}
\end{tikzpicture}
\caption{$\left( 3, 6 \right)$-regular \ac{LDPC} code with $n=204, k=102$
\cite[\text{204.33.484}]{mackay_enc}}
\end{subfigure}%
\hfill%
\begin{subfigure}[t]{0.48\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
grid=both,
xlabel={$E_b / N_0$ (dB)}, ylabel={FER},
ymode=log,
ymax=1.5, ymin=8e-5,
width=\textwidth,
height=0.75\textwidth,
]
\addplot[Turquoise, line width=1pt, mark=*]
table [x=SNR, y=FER, col sep=comma, discard if not={mu}{3.0}]
{res/admm/ber_2d_pegreg252x504.csv};
\addplot [RoyalPurple, mark=*, line width=1pt]
table [x=SNR, y=FER, col sep=comma,
discard if gt={SNR}{3}]
{res/generic/bp_pegreg252x504.csv};
\end{axis}
\end{tikzpicture}
\caption{LDPC code (progressive edge growth construction) with $n=504, k=252$
\cite[\text{PEGReg252x504}]{mackay_enc}}
\end{subfigure}%
\vspace{5mm}
\begin{subfigure}[t]{\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[hide axis,
xmin=10, xmax=50,
ymin=0, ymax=0.4,
legend columns=1,
legend cell align={left},
legend style={draw=white!15!black}]
\addlegendimage{Turquoise, line width=1pt, mark=*}
\addlegendentry{\acs{LP} decoding using \acs{ADMM}}
\addlegendimage{RoyalPurple, line width=1pt, mark=*, solid}
\addlegendentry{\acs{BP} (200 iterations)}
\end{axis}
\end{tikzpicture}
\end{subfigure}
\caption{Comparison of the decoding performance of \ac{LP} decoding using \ac{ADMM}
and \ac{BP} for various codes}
\label{fig:admm:bp_multiple}
\end{figure}