Second round of changes I

This commit is contained in:
Andreas Tsouchlos 2023-01-26 23:59:35 +01:00
parent 331378fc90
commit 0455d16fce
5 changed files with 88 additions and 74 deletions

View File

@ -126,7 +126,7 @@
\title{Application of Optimization Algorithms for Channel Decoding}
\subtitle{\small Midterm Presentation - 27.01.2023}
\subtitle{\small Midterm Presentation, 27.01.2023}
%\author{Andreas Tsouchlos}
\author{\vspace{1.5mm} Andreas Tsouchlos}

View File

@ -13,10 +13,10 @@
\item MAP rule:
\begin{align*}
\hat{\boldsymbol{x}}
= \argmax_{x\in\mathbb{R}}
= \argmax_{x\in\mathbb{R}^n}
f_{\boldsymbol{Y}}\left( \boldsymbol{y} | \boldsymbol{x} \right)
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)
= \argmax_{x\in\mathbb{R}}
= \argmax_{x\in\mathbb{R^n}}
e^{-L\left( \boldsymbol{y} | \boldsymbol{x}\right)}
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)
\end{align*}
@ -24,23 +24,34 @@
\begin{align*}
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)
= \frac{1}{\left| \mathcal{C}\left( \boldsymbol{H} \right) \right| }
\sum_{c \in \mathcal{C}\left( \boldsymbol{H} \right) }
\sum_{\boldsymbol{c} \in \mathcal{C}\left( \boldsymbol{H} \right) }
\delta\left( \boldsymbol{x} - \left( -1 \right)^{\boldsymbol{c}} \right)
\approx \frac{1}{Z} e^{-\gamma h\left( x \right) }
\approx \frac{1}{Z} e^{-\gamma h\left( \boldsymbol{x} \right) }
\end{align*}
\item Code constraint polynomial:
\begin{align*}
h\left( \boldsymbol{x} \right) =
\underbrace{\sum_{j=1}^{n} \left( x_j^2 - 1 \right)^2}_{\text{Bipolar constraint}}
+ \underbrace{\sum_{i=1}^{m} \left[ \left(
\prod_{j\in\mathcal{A}\left( i \right)} x_j\right) -1 \right]^2}
_{\text{Parity constraint}},
\hspace{5mm}\mathcal{A}\left( i \right) \equiv \left\{
j | j\in \mathcal{J},
\boldsymbol{H}_{j,i} = 1
\right\},
i \in \mathcal{I}
\end{align*}
\begin{minipage}[c]{0.56\textwidth}
\raggedright
\begin{align*}
h\left( \boldsymbol{x} \right) =
\underbrace{\sum_{j=1}^{n} \left( x_j^2 - 1 \right)^2}_{\text{Bipolar
constraint}}
+ \underbrace{\sum_{i=1}^{m} \left[ \left(
\prod_{j\in\mathcal{A}\left( i \right)} x_j\right) -1 \right]^2}
_{\text{Parity constraint}},
\end{align*}
\end{minipage}%
\begin{minipage}[c]{0.4\textwidth}
\raggedleft
\begin{flalign*}
\mathcal{I} &\equiv \left\{\text{``Set of all variable nodes''}\right\} &\\
\mathcal{J} &\equiv \left\{\text{``Set of all check nodes''}\right\} &\\
\mathcal{A}\left( i \right) &\equiv \left\{j | j\in \mathcal{J},
\boldsymbol{H}_{j,i} = 1
\right\}, i \in \mathcal{I}&\\
\end{flalign*}
\end{minipage}
\hfill
\end{itemize}
\end{frame}
@ -63,8 +74,8 @@
\item Code proximal operator \cite{proximal_algorithms}:
\begin{align*}
\text{prox}_{\gamma h} \left( \boldsymbol{x} \right) &\equiv
\argmin_{\boldsymbol{z}\in\mathbb{R}} \left(
\gamma h\left( \boldsymbol{z} \right) + \frac{1}{2} \lVert \boldsymbol{z}
\argmin_{\boldsymbol{t}\in\mathbb{R}^n} \left(
\gamma h\left( \boldsymbol{t} \right) + \frac{1}{2} \lVert \boldsymbol{t}
- \boldsymbol{x} \rVert^2 \right)\\
&\approx \boldsymbol{x} - \gamma \nabla h\left( \boldsymbol{x} \right),
\hspace{5mm} \gamma \text{ small}
@ -77,7 +88,8 @@
\hspace{10mm} \text{``Gradient descent step''}\\
\boldsymbol{s} &\leftarrow \boldsymbol{r}
- \gamma \nabla h\left( \boldsymbol{r}
\right) \hspace{29mm} \text{``Code proximal step''}
\right), \hspace{9mm} \gamma > 0
\hspace{10mm} \text{``Code proximal step''}
\end{align*}
\end{itemize}
\end{frame}
@ -87,7 +99,7 @@
\begin{frame}[t, fragile]
\frametitle{Proximal Decoding: Algorithm}
\begin{itemize}
\item Resulting iterative decoding algorithm \cite{proximal_paper}:
\item Iterative decoding algorithm \cite{proximal_paper}:
\end{itemize}
\vspace{2mm}
@ -121,12 +133,12 @@ return $\boldsymbol{\hat{c}}$
\boldsymbol{c} : \lambda_{\boldsymbol{c}} \ge 0,
\sum_{\boldsymbol{c}\in\mathcal{C}}\lambda_{\boldsymbol{c}} = 1
\right\},
\hspace{5mm} \lambda_{\boldsymbol{c}} \in \mathbb{R}
\hspace{5mm} \lambda_{\boldsymbol{c}} \in \mathbb{R}_{\ge 0}
\end{align*}
\item Cost function:
\begin{align*}
\sum_{i=1}^{n} \gamma_i c_i,
\hspace{5mm}\gamma_i = \log\left(
\hspace{5mm}\gamma_i = \ln\left(
\frac{P\left( Y=y_i | C=0 \right) }{P\left( Y=y_i | C=1 \right) } \right)
\end{align*}
\item LP formulation of ML decoding:

View File

@ -1,5 +1,5 @@
\section{Examination Results}%
\label{sec:Examination Results}
\section{Analysis}%
\label{sec:Analysis}
\subsection{Proximal Decoding}%
@ -11,10 +11,10 @@
\frametitle{Proximal Decoding: Bit Error Rate and Performance}
\vspace*{-0.5cm}
\begin{itemize}
\item Comparison of simulation
\item Comparison of simulation%
\footnote{(3,6) regular LDPC code with $n=204, k=102$
\cite[\text{204.33.484}]{mackay_enc}}
with results of Wadayama et al.
with results of Wadayama et al. \cite{proximal_paper}
\end{itemize}
\begin{figure}[H]
@ -78,9 +78,9 @@
\vspace*{-0.5cm}
\begin{itemize}
\item $\mathcal{O}\left(n \right) $ time complexity - same as BP;
only multiplication and addition necessary \cite{proximal_paper}
\item Measured Performance: $\sim\SI{10000}{}$ frames/s
- Intel Core i7-7700HQ @ 2.80GHz; $n=204$
only multiplication and addition necessary
\item Measured performance: $\sim\SI{10000}{}$ frames/s
on Intel Core i7-7700HQ @ 2.80GHz; $n=204$
\end{itemize}
\vspace{3mm}
\end{frame}
@ -92,7 +92,7 @@
\setcounter{footnote}{0}
\begin{itemize}
\item Simulation
\item Simulation%
\footnote{(3,6) regular LDPC code with $n=204, k=102$
\cite[\text{204.33.484}]{mackay_enc}}
results for different values of $\gamma$
@ -392,7 +392,7 @@
\setcounter{footnote}{0}
\begin{itemize}
\item Analysis of simulated
\item Analysis of simulated%
\footnote{(3,6) regular LDPC code with $n=204, k=102$
\cite[\text{204.33.484}]{mackay_enc}}
BER and FER
@ -532,8 +532,8 @@ return $\boldsymbol{\hat{c}}$
table [col sep=comma, x=k, y=grad_h_1]
{res/proximal/comp_bch_7_4_combined.csv};
\addlegendentry{est}
\addlegendentry{$\nabla L \left[ 2 \right] $}
\addlegendentry{$\nabla h \left[ 2 \right] $}
\addlegendentry{$\left(\nabla L \right)_2$}
\addlegendentry{$\left(\nabla h \right)_2 $}
\end{axis}
\end{tikzpicture}\\
\begin{tikzpicture}[scale = 0.35]
@ -556,8 +556,8 @@ return $\boldsymbol{\hat{c}}$
table [col sep=comma, x=k, y=grad_h_2]
{res/proximal/comp_bch_7_4_combined.csv};
\addlegendentry{est}
\addlegendentry{$\nabla L \left[ 3 \right] $}
\addlegendentry{$\nabla h \left[ 3 \right] $}
\addlegendentry{$\left(\nabla L \right)_3$}
\addlegendentry{$\left(\nabla h \right)_3 $}
\end{axis}
\end{tikzpicture}\\
\begin{tikzpicture}[scale = 0.35]
@ -580,8 +580,8 @@ return $\boldsymbol{\hat{c}}$
table [col sep=comma, x=k, y=grad_h_3]
{res/proximal/comp_bch_7_4_combined.csv};
\addlegendentry{est}
\addlegendentry{$\nabla L \left[ 4 \right] $}
\addlegendentry{$\nabla h \left[ 4 \right] $}
\addlegendentry{$\left(\nabla L \right)_4$}
\addlegendentry{$\left(\nabla h \right)_4 $}
\end{axis}
\end{tikzpicture}
\end{minipage}%
@ -608,8 +608,8 @@ return $\boldsymbol{\hat{c}}$
table [col sep=comma, x=k, y=grad_h_0]
{res/proximal/comp_bch_7_4_combined.csv};
\addlegendentry{est}
\addlegendentry{$\nabla L \left[ 1 \right] $}
\addlegendentry{$\nabla h \left[ 1 \right] $}
\addlegendentry{$\left(\nabla L \right)_1$}
\addlegendentry{$\left(\nabla h \right)_1 $}
\end{axis}
\end{tikzpicture}
\end{minipage}%
@ -636,8 +636,8 @@ return $\boldsymbol{\hat{c}}$
table [col sep=comma, x=k, y=grad_h_4]
{res/proximal/comp_bch_7_4_combined.csv};
\addlegendentry{est}
\addlegendentry{$\nabla L \left[ 5 \right] $}
\addlegendentry{$\nabla h \left[ 5 \right] $}
\addlegendentry{$\left(\nabla L \right)_5$}
\addlegendentry{$\left(\nabla h \right)_5 $}
\end{axis}
\end{tikzpicture}\\
\begin{tikzpicture}[scale = 0.35]
@ -660,8 +660,8 @@ return $\boldsymbol{\hat{c}}$
table [col sep=comma, x=k, y=grad_h_5]
{res/proximal/comp_bch_7_4_combined.csv};
\addlegendentry{est}
\addlegendentry{$\nabla L \left[ 6 \right] $}
\addlegendentry{$\nabla h \left[ 6 \right] $}
\addlegendentry{$\left(\nabla L \right)_6$}
\addlegendentry{$\left(\nabla h \right)_6 $}
\end{axis}
\end{tikzpicture}\\
\begin{tikzpicture}[scale = 0.35]
@ -684,14 +684,14 @@ return $\boldsymbol{\hat{c}}$
table [col sep=comma, x=k, y=grad_h_6]
{res/proximal/comp_bch_7_4_combined.csv};
\addlegendentry{est}
\addlegendentry{$\nabla L \left[ 7 \right] $}
\addlegendentry{$\nabla h \left[ 7 \right] $}
\addlegendentry{$\left(\nabla L \right)_7$}
\addlegendentry{$\left(\nabla h \right)_7 $}
\end{axis}
\end{tikzpicture}
\end{minipage}
\caption{Internal variables of proximal decoder
as a function of the iteration ($n=7$)\footnotemark}
as a function of the number of iterations ($n=7$)\footnotemark}
\footnotetext{A single decoding is shown, using the BCH$\left( 7,4 \right) $ code;
$\gamma = 0.05, \omega = 0.05, E_b / N_0 = \SI{5}{dB}$}
@ -1018,7 +1018,7 @@ $\textcolor{KITblue}{\text{Compute }d_H\left( \boldsymbol{ \tilde{c}}_n, \boldsy
$\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d_H\left( \boldsymbol{ \tilde{c}}_n, \boldsymbol{\hat{c}} \right)}$
\end{algorithm}
\caption{Hybrid proximal \& ML decoding algorithm}
\caption{Improved proximal decoding algorithm}
\end{figure}
\end{minipage}
\end{frame}
@ -1029,8 +1029,8 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
\frametitle{Proximal Decoding: Improvement using ``ML-on-List''}
\begin{itemize}
\item Comparison of proximal \& hybrid proximal-ML (correction of $N = \SI{12}{\bit}$)
decoding simulation
\item Comparison of proximal \& improved (correction of $N = \SI{12}{\bit}$)
decoding simulation%
\footnote{(3,6) regular LDPC code with $n=204, k=102$
\cite[Code: 204.33.484]{mackay_enc}}
results
@ -1150,13 +1150,13 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
\addlegendentry{proximal, $\gamma = 0.05$}
\addlegendimage{Emerald, mark=triangle, densely dashed}
\addlegendentry{hybrid, $\gamma = 0.15$}
\addlegendentry{improved, $\gamma = 0.15$}
\addlegendimage{RoyalPurple, mark=triangle, densely dashed}
\addlegendentry{hybrid, $\gamma = 0.01$}
\addlegendentry{improved, $\gamma = 0.01$}
\addlegendimage{red, mark=triangle, densely dashed}
\addlegendentry{hybrid, $\gamma = 0.05$}
\addlegendentry{improved, $\gamma = 0.05$}
\end{axis}
\end{tikzpicture}
@ -1433,19 +1433,19 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
\addlegendentry{proximal, $\gamma = 0.15$}
\addlegendimage{Emerald, mark=triangle, densely dashed}
\addlegendentry{hybrid, $\gamma = 0.15$}
\addlegendentry{improved, $\gamma = 0.15$}
\addlegendimage{NavyBlue, mark=*, solid}
\addlegendentry{proximal, $\gamma = 0.01$}
\addlegendimage{RoyalPurple, mark=triangle, densely dashed}
\addlegendentry{hybrid, $\gamma = 0.01$}
\addlegendentry{improved, $\gamma = 0.01$}
\addlegendimage{RedOrange, mark=*, solid}
\addlegendentry{proximal, $\gamma = 0.05$}
\addlegendimage{red, mark=triangle, densely dashed}
\addlegendentry{hybrid, $\gamma = 0.05$}
\addlegendentry{improved, $\gamma = 0.05$}
\end{axis}
\end{tikzpicture}
@ -1466,7 +1466,7 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
\begin{axis}[
grid=both,
xlabel={Iterations},
ylabel={Average $\left| \boldsymbol{x}-\boldsymbol{\hat{x}} \right|$},
ylabel={Average $\lVert \boldsymbol{c}-\boldsymbol{\hat{c}} \rVert$},
legend pos=outer north east,
]
\addplot [ForestGreen, mark=none, line width=1pt]
@ -1488,7 +1488,7 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
\end{axis}
\end{tikzpicture}
\caption{Average error for $\SI{500000}{}$ decodings,$
\caption{Average error for $\SI{500000}{}$ decodings, $
\omega = 0.05, \gamma = 0.05, K=200$\footnotemark}
\end{figure}
@ -1496,7 +1496,8 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
\cite[Code: 204.33.484]{mackay_enc}}
\begin{itemize}
\item For large $k$, the average error asymptotically approaches a minimum, non-zero value
\item With increasing iterations, the average error asymptotically
approaches a minimum, non-zero value
\end{itemize}
\end{frame}
@ -1513,6 +1514,7 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
grid=both,
xlabel={$n$}, ylabel={time per frame (s)},
legend style={at={(0.05,0.77)},anchor=south west},
legend cell align={left},
]
\addplot[RedOrange, only marks, mark=*]
table [col sep=comma, x=n, y=spf]
@ -1522,7 +1524,7 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
\addplot[RoyalPurple, only marks, mark=triangle*]
table [col sep=comma, x=n, y=spf]
{res/hybrid/fps_vs_n.csv};
\addlegendentry{hybrid prox \& ML ($\SI{12}{\bit}$)}
\addlegendentry{improved ($\SI{12}{\bit}$)}
\end{axis}
\end{tikzpicture}
@ -1551,13 +1553,12 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
\item Error coding performance (BER, FER, decoding failures)
\item Computational performance ($\mathcal{O}\left( n \right) $ time complexity,
fast implementation possible)
\item Number of iterations required independant of SNR
\item Operation during iteration (oscillation of estimate)
\item Number of iterations independent of SNR
\end{itemize}
\item Suggestion for improvement of proximal decoding:
\begin{itemize}
\item Addidion of ``ML-on-list'' step
\item $\sim\SI{1}{dB}$ gain under certain conditions
\item Addition of ``ML-on-list'' step
\item Up to $\sim\SI{1}{dB}$ gain under certain conditions
\end{itemize}
\end{itemize}
\end{frame}

View File

@ -1,4 +1,4 @@
\section{Forthcoming Examinations}%
\section{Forthcoming Analysis}%
\label{sec:Forthcoming Examinations}
@ -10,7 +10,7 @@
\frametitle{Forthcoming Examinations}
\begin{itemize}
\item Test the (Alternating Direction Method of Multipliers) ADMM
\item Test ADMM (Alternating Direction Method of Multipliers)
as an optimization method for LP Decoding
\begin{itemize}
\item In LP decoding, the ML decoding problem is reduced to a linear program,
@ -23,8 +23,8 @@
\end{itemize}
\item Compare ADMM implementation with Proximal Decoding implementation with respect to
\begin{itemize}
\item Decoding performance (BER, FER)
\item Computational performance (time complexity, actual seconds per frame)
\item decoding performance (BER, FER)
\item computational performance (time complexity, actual seconds per frame)
\end{itemize}
\end{itemize}

View File

@ -23,7 +23,7 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]
\frametitle{Previous work}
\frametitle{Previous Work}
\begin{figure}[h]
\centering
@ -47,8 +47,8 @@
\end{figure}
\begin{itemize}
\item Examination of ``Proximal Decoding''
\item Examination of ``Interior Point Decoding''
\item Analysis of ``Proximal Decoding''
\item Analysis of ``Interior Point Decoding''
\end{itemize}
\end{frame}
@ -89,12 +89,13 @@
\end{figure}
\begin{itemize}
\item All simulations are performed with BPSK modulation:
\item All simulations are performed with BPSK:
\begin{align*}
\boldsymbol{x} = \left( -1 \right)^{\boldsymbol{c}},
\hspace{5mm} \boldsymbol{c} \in \mathbb{F}_2^n
\hspace{5mm} \boldsymbol{c} \in \mathbb{F}_2^n,
\hspace{2mm} \boldsymbol{x} \in \mathbb{R}^n
\end{align*}
\item The used channel model is AWGN:
\item The channel model is AWGN:
\begin{align*}
\boldsymbol{y} = \boldsymbol{x} + \boldsymbol{z},
\hspace{5mm}\boldsymbol{z}\sim \mathcal{N}