First round of corrections

This commit is contained in:
Andreas Tsouchlos 2023-01-25 22:01:01 +01:00
parent c18d0f8c12
commit f433607ce6
5 changed files with 34 additions and 35 deletions

View File

@ -121,7 +121,7 @@
\title{Application of Optimization Algorithms for Channel Decoding}
\subtitle{\small Midterm Presentation}
\subtitle{\small Midterm Presentation - 27.01.2023}
%\author{Andreas Tsouchlos}
\author{\vspace{1.5mm} Andreas Tsouchlos}

View File

@ -116,7 +116,7 @@ Output $\boldsymbol{\hat{x}}$
\begin{minipage}[c]{0.6\linewidth}
\begin{itemize}
\item Codeword Polytope:
\item Codeword polytope:
\begin{align*}
\text{poly}\left( \mathcal{C} \right) =
\left\{
@ -126,13 +126,13 @@ Output $\boldsymbol{\hat{x}}$
\right\},
\hspace{5mm} \lambda_{\boldsymbol{c}} \in \mathbb{R}
\end{align*}
\item Cost Function:
\item Cost function:
\begin{align*}
\sum_{i=1}^{n} \gamma_i c_i,
\hspace{5mm}\gamma_i = \log\left(
\frac{P\left( Y=y_i | C=0 \right) }{P\left( Y=y_i | C=1 \right) } \right)
\end{align*}
\item LP Formulation of ML Decoding:
\item LP formulation of ML decoding:
\begin{align*}
&\text{minimize } \sum_{i=1}^{n} \gamma_i f_i \\
&\text{subject to } \boldsymbol{f}\in\text{poly}\left( \mathcal{C} \right)

View File

@ -67,8 +67,8 @@
\end{figure}
\item $\mathcal{O}\left(n \right) $ time complexity - same as BP;
Only multiplication and addition necessary \cite{proximal_paper}
\item Measured Performance: $\sim\SI{10000}{frames / \second}$
only multiplication and addition necessary \cite{proximal_paper}
\item Measured Performance: $\sim\SI{10000}{}$ frames/s
- Intel Core i7-7700HQ @ 2.80GHz; $n=204$
\end{itemize}
\vspace{3mm}
@ -81,8 +81,8 @@
\setcounter{footnote}{0}
\begin{itemize}
\item Comparison of simulation
\footnote{(3,6) regular LDPC Code with $n=204, k=102$
\item Analysis of simulation
\footnote{(3,6) regular LDPC code with $n=204, k=102$
\cite[\text{204.33.484}]{mackay_enc}}
results for different values of $\gamma$
\end{itemize}
@ -367,8 +367,8 @@
\setcounter{footnote}{0}
\begin{itemize}
\item Comparison of simulated
\footnote{(3,6) regular LDPC Code with $n=204, k=102$
\item Analysis of simulated
\footnote{(3,6) regular LDPC code with $n=204, k=102$
\cite[\text{204.33.484}]{mackay_enc}}
BER and FER
\end{itemize}
@ -660,8 +660,8 @@ Output $\boldsymbol{\hat{x}}$
\setcounter{footnote}{0}
\begin{itemize}
\item For larger $n$, the Gradient itself starts to oscillate
\item The Amplitude of the oscillation seems to be highly correlated
\item For larger $n$, the gradient itself starts to oscillate
\item The amplitude of the oscillation seems to be highly correlated
with the probability of a bit error
\end{itemize}
@ -721,12 +721,12 @@ Output $\boldsymbol{\hat{x}}$
\end{axis}
\end{tikzpicture}
\caption{Corellation between bit error and amplitude of oscillation}
\caption{Correlation between bit error and amplitude of oscillation}
\end{subfigure}
\end{figure}
\footnotetext{A single decoding is shown, using a (3,6) regular LDPC Code
\footnotetext{A single decoding is shown, using a (3,6) regular LDPC code
with $n=204, k=102$ \cite[\text{204.33.484}]{mackay_enc};
$\gamma = 0.05, \omega = 0.05, E_b / N_0 = \SI{5}{dB}$}
\end{frame}
@ -1364,11 +1364,11 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{x}}_n\text{ with lowest }d
\end{axis}
\end{tikzpicture}
\caption{Average error for $\SI{500000}{decodings},
\caption{Average error for $\SI{500000}{}$ decodings,$
\omega = 0.05, \gamma = 0.05, K=200$\footnotemark}
\end{figure}
\footnotetext{Simulation performed with (3,6) regular LDPC Code with $n=204, k=102$
\footnotetext{Simulation performed with (3,6) regular LDPC code with $n=204, k=102$
\cite[Code: 204.33.484]{mackay_enc}}
\begin{itemize}

View File

@ -1,5 +1,5 @@
\section{Forthcoming Examination}%
\label{sec:Forthcoming Examination}
\section{Forthcoming Examinations}%
\label{sec:Forthcoming Examinations}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@ -7,7 +7,7 @@
\label{sub:LP Decoding}
\begin{frame}[t]
\frametitle{Forthcoming Examination: LP Decoding}
\frametitle{Forthcoming Examinations: LP Decoding}
\begin{itemize}
\item Test the (Alternating Direction Method of Multipliers) ADMM

View File

@ -11,8 +11,8 @@
\begin{itemize}
\item The general [ML] decoding problem for linear codes and the general problem
of finding the weights of a linear code are both NP-complete. \cite{ml_np_hard_proof}
\item The iterative messagepassing algorithms preffered in practice do not guarantee
optimality and may fail to decode correctly when the graph contains cycles
\item The iterative messagepassing algorithms preferred in practice do not guarantee
optimality and may fail to decode correctly when the graph contains cycles.
\cite{ldpc_conv}
\item The standard message-passing algorithms used for decoding [LDPC and turbo codes]
are often difficult to analyze. \cite{feldman_thesis}
@ -48,7 +48,7 @@
\begin{itemize}
\item Examination of ``Proximal Decoding''
\item Examination of ``Iterative Point Decoding''
\item Examination of ``Interior Point Decoding''
\end{itemize}
\end{frame}
@ -66,14 +66,14 @@
\centering
\begin{tikzpicture}[scale=1, transform shape]
\node (in) {$c\left[ k \right] $};
\node (in) {$\boldsymbol{c}$};
\node[mapper, right=0.5cm of in] (bpskmap) {Mapper};
\node[right=1.5cm of bpskmap,
draw, circle, inner sep=0pt, minimum size=0.5cm] (add) {$+$};
\node[right=0.5cm of add] (out) {$y\left[ k \right] $};
\node[below=0.5cm of add] (noise) {$n\left[ k \right] $};
\node[right=0.5cm of add] (out) {$\boldsymbol{y}$};
\node[below=0.5cm of add] (noise) {$\boldsymbol{z}$};
\node at ($(bpskmap.east)!0.5!(add.west) + (0,0.3cm)$) {$x\left[ k \right] $};
\node at ($(bpskmap.east)!0.5!(add.west) + (0,0.3cm)$) {$\boldsymbol{x}$};
\draw[->] (in) -- (bpskmap);
\draw[->] (bpskmap) -- (add);
@ -83,22 +83,21 @@
\end{figure}
\begin{itemize}
\item All simulations are performed with BPSK Modulation:
\item All simulations are performed with BPSK modulation:
\begin{align*}
x\left[ k \right] = \left( -1 \right)^{c\left[ k \right] },
\hspace{5mm} \boldsymbol{c} \in \mathbb{F}_2^n,
\hspace{2mm} k\in \left\{ 1, \ldots, n \right\}
\boldsymbol{x} = \left( -1 \right)^{\boldsymbol{c}},
\hspace{5mm} \boldsymbol{c} \in \mathbb{F}_2^n
\end{align*}
\item The used channel model is AWGN:
\begin{align*}
\boldsymbol{y} = \boldsymbol{x} + \boldsymbol{n},
\hspace{5mm}\boldsymbol{n}\sim \mathcal{N}
\boldsymbol{y} = \boldsymbol{x} + \boldsymbol{z},
\hspace{5mm}\boldsymbol{z}\sim \mathcal{N}
\left(0,\frac{1}{2}\left(\frac{k}{n}\frac{E_b}{N_0}\right)^{-1}\right),
\hspace{2mm} \boldsymbol{y}, \boldsymbol{n} \in \mathbb{R}^n
\hspace{2mm} \boldsymbol{y}, \boldsymbol{z} \in \mathbb{R}^n
\end{align*}
\item All-zeros assumption:
\begin{align*}
\boldsymbol{c} = 0
\boldsymbol{c} = \boldsymbol{0}
\end{align*}
\end{itemize}
\end{frame}
@ -113,7 +112,7 @@
\begin{minipage}[c]{0.6\linewidth}
\begin{itemize}
\item Reormulate decoding problem as optimization problem
\item Reformulate decoding problem as optimization problem
\begin{itemize}
\item Establish objective function
\item Establish constraints