Second round of changes I
This commit is contained in:
parent
331378fc90
commit
0455d16fce
@ -126,7 +126,7 @@
|
|||||||
|
|
||||||
|
|
||||||
\title{Application of Optimization Algorithms for Channel Decoding}
|
\title{Application of Optimization Algorithms for Channel Decoding}
|
||||||
\subtitle{\small Midterm Presentation - 27.01.2023}
|
\subtitle{\small Midterm Presentation, 27.01.2023}
|
||||||
%\author{Andreas Tsouchlos}
|
%\author{Andreas Tsouchlos}
|
||||||
\author{\vspace{1.5mm} Andreas Tsouchlos}
|
\author{\vspace{1.5mm} Andreas Tsouchlos}
|
||||||
|
|
||||||
|
|||||||
@ -13,10 +13,10 @@
|
|||||||
\item MAP rule:
|
\item MAP rule:
|
||||||
\begin{align*}
|
\begin{align*}
|
||||||
\hat{\boldsymbol{x}}
|
\hat{\boldsymbol{x}}
|
||||||
= \argmax_{x\in\mathbb{R}}
|
= \argmax_{x\in\mathbb{R}^n}
|
||||||
f_{\boldsymbol{Y}}\left( \boldsymbol{y} | \boldsymbol{x} \right)
|
f_{\boldsymbol{Y}}\left( \boldsymbol{y} | \boldsymbol{x} \right)
|
||||||
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)
|
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)
|
||||||
= \argmax_{x\in\mathbb{R}}
|
= \argmax_{x\in\mathbb{R^n}}
|
||||||
e^{-L\left( \boldsymbol{y} | \boldsymbol{x}\right)}
|
e^{-L\left( \boldsymbol{y} | \boldsymbol{x}\right)}
|
||||||
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)
|
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)
|
||||||
\end{align*}
|
\end{align*}
|
||||||
@ -24,23 +24,34 @@
|
|||||||
\begin{align*}
|
\begin{align*}
|
||||||
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)
|
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)
|
||||||
= \frac{1}{\left| \mathcal{C}\left( \boldsymbol{H} \right) \right| }
|
= \frac{1}{\left| \mathcal{C}\left( \boldsymbol{H} \right) \right| }
|
||||||
\sum_{c \in \mathcal{C}\left( \boldsymbol{H} \right) }
|
\sum_{\boldsymbol{c} \in \mathcal{C}\left( \boldsymbol{H} \right) }
|
||||||
\delta\left( \boldsymbol{x} - \left( -1 \right)^{\boldsymbol{c}} \right)
|
\delta\left( \boldsymbol{x} - \left( -1 \right)^{\boldsymbol{c}} \right)
|
||||||
\approx \frac{1}{Z} e^{-\gamma h\left( x \right) }
|
\approx \frac{1}{Z} e^{-\gamma h\left( \boldsymbol{x} \right) }
|
||||||
\end{align*}
|
\end{align*}
|
||||||
\item Code constraint polynomial:
|
\item Code constraint polynomial:
|
||||||
|
\begin{minipage}[c]{0.56\textwidth}
|
||||||
|
\raggedright
|
||||||
|
|
||||||
\begin{align*}
|
\begin{align*}
|
||||||
h\left( \boldsymbol{x} \right) =
|
h\left( \boldsymbol{x} \right) =
|
||||||
\underbrace{\sum_{j=1}^{n} \left( x_j^2 - 1 \right)^2}_{\text{Bipolar constraint}}
|
\underbrace{\sum_{j=1}^{n} \left( x_j^2 - 1 \right)^2}_{\text{Bipolar
|
||||||
|
constraint}}
|
||||||
+ \underbrace{\sum_{i=1}^{m} \left[ \left(
|
+ \underbrace{\sum_{i=1}^{m} \left[ \left(
|
||||||
\prod_{j\in\mathcal{A}\left( i \right)} x_j\right) -1 \right]^2}
|
\prod_{j\in\mathcal{A}\left( i \right)} x_j\right) -1 \right]^2}
|
||||||
_{\text{Parity constraint}},
|
_{\text{Parity constraint}},
|
||||||
\hspace{5mm}\mathcal{A}\left( i \right) \equiv \left\{
|
|
||||||
j | j\in \mathcal{J},
|
|
||||||
\boldsymbol{H}_{j,i} = 1
|
|
||||||
\right\},
|
|
||||||
i \in \mathcal{I}
|
|
||||||
\end{align*}
|
\end{align*}
|
||||||
|
\end{minipage}%
|
||||||
|
\begin{minipage}[c]{0.4\textwidth}
|
||||||
|
\raggedleft
|
||||||
|
\begin{flalign*}
|
||||||
|
\mathcal{I} &\equiv \left\{\text{``Set of all variable nodes''}\right\} &\\
|
||||||
|
\mathcal{J} &\equiv \left\{\text{``Set of all check nodes''}\right\} &\\
|
||||||
|
\mathcal{A}\left( i \right) &\equiv \left\{j | j\in \mathcal{J},
|
||||||
|
\boldsymbol{H}_{j,i} = 1
|
||||||
|
\right\}, i \in \mathcal{I}&\\
|
||||||
|
\end{flalign*}
|
||||||
|
\end{minipage}
|
||||||
|
\hfill
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{frame}
|
\end{frame}
|
||||||
|
|
||||||
@ -63,8 +74,8 @@
|
|||||||
\item Code proximal operator \cite{proximal_algorithms}:
|
\item Code proximal operator \cite{proximal_algorithms}:
|
||||||
\begin{align*}
|
\begin{align*}
|
||||||
\text{prox}_{\gamma h} \left( \boldsymbol{x} \right) &\equiv
|
\text{prox}_{\gamma h} \left( \boldsymbol{x} \right) &\equiv
|
||||||
\argmin_{\boldsymbol{z}\in\mathbb{R}} \left(
|
\argmin_{\boldsymbol{t}\in\mathbb{R}^n} \left(
|
||||||
\gamma h\left( \boldsymbol{z} \right) + \frac{1}{2} \lVert \boldsymbol{z}
|
\gamma h\left( \boldsymbol{t} \right) + \frac{1}{2} \lVert \boldsymbol{t}
|
||||||
- \boldsymbol{x} \rVert^2 \right)\\
|
- \boldsymbol{x} \rVert^2 \right)\\
|
||||||
&\approx \boldsymbol{x} - \gamma \nabla h\left( \boldsymbol{x} \right),
|
&\approx \boldsymbol{x} - \gamma \nabla h\left( \boldsymbol{x} \right),
|
||||||
\hspace{5mm} \gamma \text{ small}
|
\hspace{5mm} \gamma \text{ small}
|
||||||
@ -77,7 +88,8 @@
|
|||||||
\hspace{10mm} \text{``Gradient descent step''}\\
|
\hspace{10mm} \text{``Gradient descent step''}\\
|
||||||
\boldsymbol{s} &\leftarrow \boldsymbol{r}
|
\boldsymbol{s} &\leftarrow \boldsymbol{r}
|
||||||
- \gamma \nabla h\left( \boldsymbol{r}
|
- \gamma \nabla h\left( \boldsymbol{r}
|
||||||
\right) \hspace{29mm} \text{``Code proximal step''}
|
\right), \hspace{9mm} \gamma > 0
|
||||||
|
\hspace{10mm} \text{``Code proximal step''}
|
||||||
\end{align*}
|
\end{align*}
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{frame}
|
\end{frame}
|
||||||
@ -87,7 +99,7 @@
|
|||||||
\begin{frame}[t, fragile]
|
\begin{frame}[t, fragile]
|
||||||
\frametitle{Proximal Decoding: Algorithm}
|
\frametitle{Proximal Decoding: Algorithm}
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Resulting iterative decoding algorithm \cite{proximal_paper}:
|
\item Iterative decoding algorithm \cite{proximal_paper}:
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
\vspace{2mm}
|
\vspace{2mm}
|
||||||
@ -121,12 +133,12 @@ return $\boldsymbol{\hat{c}}$
|
|||||||
\boldsymbol{c} : \lambda_{\boldsymbol{c}} \ge 0,
|
\boldsymbol{c} : \lambda_{\boldsymbol{c}} \ge 0,
|
||||||
\sum_{\boldsymbol{c}\in\mathcal{C}}\lambda_{\boldsymbol{c}} = 1
|
\sum_{\boldsymbol{c}\in\mathcal{C}}\lambda_{\boldsymbol{c}} = 1
|
||||||
\right\},
|
\right\},
|
||||||
\hspace{5mm} \lambda_{\boldsymbol{c}} \in \mathbb{R}
|
\hspace{5mm} \lambda_{\boldsymbol{c}} \in \mathbb{R}_{\ge 0}
|
||||||
\end{align*}
|
\end{align*}
|
||||||
\item Cost function:
|
\item Cost function:
|
||||||
\begin{align*}
|
\begin{align*}
|
||||||
\sum_{i=1}^{n} \gamma_i c_i,
|
\sum_{i=1}^{n} \gamma_i c_i,
|
||||||
\hspace{5mm}\gamma_i = \log\left(
|
\hspace{5mm}\gamma_i = \ln\left(
|
||||||
\frac{P\left( Y=y_i | C=0 \right) }{P\left( Y=y_i | C=1 \right) } \right)
|
\frac{P\left( Y=y_i | C=0 \right) }{P\left( Y=y_i | C=1 \right) } \right)
|
||||||
\end{align*}
|
\end{align*}
|
||||||
\item LP formulation of ML decoding:
|
\item LP formulation of ML decoding:
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
\section{Examination Results}%
|
\section{Analysis}%
|
||||||
\label{sec:Examination Results}
|
\label{sec:Analysis}
|
||||||
|
|
||||||
|
|
||||||
\subsection{Proximal Decoding}%
|
\subsection{Proximal Decoding}%
|
||||||
@ -11,10 +11,10 @@
|
|||||||
\frametitle{Proximal Decoding: Bit Error Rate and Performance}
|
\frametitle{Proximal Decoding: Bit Error Rate and Performance}
|
||||||
\vspace*{-0.5cm}
|
\vspace*{-0.5cm}
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Comparison of simulation
|
\item Comparison of simulation%
|
||||||
\footnote{(3,6) regular LDPC code with $n=204, k=102$
|
\footnote{(3,6) regular LDPC code with $n=204, k=102$
|
||||||
\cite[\text{204.33.484}]{mackay_enc}}
|
\cite[\text{204.33.484}]{mackay_enc}}
|
||||||
with results of Wadayama et al.
|
with results of Wadayama et al. \cite{proximal_paper}
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
\begin{figure}[H]
|
\begin{figure}[H]
|
||||||
@ -78,9 +78,9 @@
|
|||||||
\vspace*{-0.5cm}
|
\vspace*{-0.5cm}
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item $\mathcal{O}\left(n \right) $ time complexity - same as BP;
|
\item $\mathcal{O}\left(n \right) $ time complexity - same as BP;
|
||||||
only multiplication and addition necessary \cite{proximal_paper}
|
only multiplication and addition necessary
|
||||||
\item Measured Performance: $\sim\SI{10000}{}$ frames/s
|
\item Measured performance: $\sim\SI{10000}{}$ frames/s
|
||||||
- Intel Core i7-7700HQ @ 2.80GHz; $n=204$
|
on Intel Core i7-7700HQ @ 2.80GHz; $n=204$
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\vspace{3mm}
|
\vspace{3mm}
|
||||||
\end{frame}
|
\end{frame}
|
||||||
@ -92,7 +92,7 @@
|
|||||||
\setcounter{footnote}{0}
|
\setcounter{footnote}{0}
|
||||||
|
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Simulation
|
\item Simulation%
|
||||||
\footnote{(3,6) regular LDPC code with $n=204, k=102$
|
\footnote{(3,6) regular LDPC code with $n=204, k=102$
|
||||||
\cite[\text{204.33.484}]{mackay_enc}}
|
\cite[\text{204.33.484}]{mackay_enc}}
|
||||||
results for different values of $\gamma$
|
results for different values of $\gamma$
|
||||||
@ -392,7 +392,7 @@
|
|||||||
\setcounter{footnote}{0}
|
\setcounter{footnote}{0}
|
||||||
|
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Analysis of simulated
|
\item Analysis of simulated%
|
||||||
\footnote{(3,6) regular LDPC code with $n=204, k=102$
|
\footnote{(3,6) regular LDPC code with $n=204, k=102$
|
||||||
\cite[\text{204.33.484}]{mackay_enc}}
|
\cite[\text{204.33.484}]{mackay_enc}}
|
||||||
BER and FER
|
BER and FER
|
||||||
@ -532,8 +532,8 @@ return $\boldsymbol{\hat{c}}$
|
|||||||
table [col sep=comma, x=k, y=grad_h_1]
|
table [col sep=comma, x=k, y=grad_h_1]
|
||||||
{res/proximal/comp_bch_7_4_combined.csv};
|
{res/proximal/comp_bch_7_4_combined.csv};
|
||||||
\addlegendentry{est}
|
\addlegendentry{est}
|
||||||
\addlegendentry{$\nabla L \left[ 2 \right] $}
|
\addlegendentry{$\left(\nabla L \right)_2$}
|
||||||
\addlegendentry{$\nabla h \left[ 2 \right] $}
|
\addlegendentry{$\left(\nabla h \right)_2 $}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}\\
|
\end{tikzpicture}\\
|
||||||
\begin{tikzpicture}[scale = 0.35]
|
\begin{tikzpicture}[scale = 0.35]
|
||||||
@ -556,8 +556,8 @@ return $\boldsymbol{\hat{c}}$
|
|||||||
table [col sep=comma, x=k, y=grad_h_2]
|
table [col sep=comma, x=k, y=grad_h_2]
|
||||||
{res/proximal/comp_bch_7_4_combined.csv};
|
{res/proximal/comp_bch_7_4_combined.csv};
|
||||||
\addlegendentry{est}
|
\addlegendentry{est}
|
||||||
\addlegendentry{$\nabla L \left[ 3 \right] $}
|
\addlegendentry{$\left(\nabla L \right)_3$}
|
||||||
\addlegendentry{$\nabla h \left[ 3 \right] $}
|
\addlegendentry{$\left(\nabla h \right)_3 $}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}\\
|
\end{tikzpicture}\\
|
||||||
\begin{tikzpicture}[scale = 0.35]
|
\begin{tikzpicture}[scale = 0.35]
|
||||||
@ -580,8 +580,8 @@ return $\boldsymbol{\hat{c}}$
|
|||||||
table [col sep=comma, x=k, y=grad_h_3]
|
table [col sep=comma, x=k, y=grad_h_3]
|
||||||
{res/proximal/comp_bch_7_4_combined.csv};
|
{res/proximal/comp_bch_7_4_combined.csv};
|
||||||
\addlegendentry{est}
|
\addlegendentry{est}
|
||||||
\addlegendentry{$\nabla L \left[ 4 \right] $}
|
\addlegendentry{$\left(\nabla L \right)_4$}
|
||||||
\addlegendentry{$\nabla h \left[ 4 \right] $}
|
\addlegendentry{$\left(\nabla h \right)_4 $}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}
|
\end{tikzpicture}
|
||||||
\end{minipage}%
|
\end{minipage}%
|
||||||
@ -608,8 +608,8 @@ return $\boldsymbol{\hat{c}}$
|
|||||||
table [col sep=comma, x=k, y=grad_h_0]
|
table [col sep=comma, x=k, y=grad_h_0]
|
||||||
{res/proximal/comp_bch_7_4_combined.csv};
|
{res/proximal/comp_bch_7_4_combined.csv};
|
||||||
\addlegendentry{est}
|
\addlegendentry{est}
|
||||||
\addlegendentry{$\nabla L \left[ 1 \right] $}
|
\addlegendentry{$\left(\nabla L \right)_1$}
|
||||||
\addlegendentry{$\nabla h \left[ 1 \right] $}
|
\addlegendentry{$\left(\nabla h \right)_1 $}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}
|
\end{tikzpicture}
|
||||||
\end{minipage}%
|
\end{minipage}%
|
||||||
@ -636,8 +636,8 @@ return $\boldsymbol{\hat{c}}$
|
|||||||
table [col sep=comma, x=k, y=grad_h_4]
|
table [col sep=comma, x=k, y=grad_h_4]
|
||||||
{res/proximal/comp_bch_7_4_combined.csv};
|
{res/proximal/comp_bch_7_4_combined.csv};
|
||||||
\addlegendentry{est}
|
\addlegendentry{est}
|
||||||
\addlegendentry{$\nabla L \left[ 5 \right] $}
|
\addlegendentry{$\left(\nabla L \right)_5$}
|
||||||
\addlegendentry{$\nabla h \left[ 5 \right] $}
|
\addlegendentry{$\left(\nabla h \right)_5 $}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}\\
|
\end{tikzpicture}\\
|
||||||
\begin{tikzpicture}[scale = 0.35]
|
\begin{tikzpicture}[scale = 0.35]
|
||||||
@ -660,8 +660,8 @@ return $\boldsymbol{\hat{c}}$
|
|||||||
table [col sep=comma, x=k, y=grad_h_5]
|
table [col sep=comma, x=k, y=grad_h_5]
|
||||||
{res/proximal/comp_bch_7_4_combined.csv};
|
{res/proximal/comp_bch_7_4_combined.csv};
|
||||||
\addlegendentry{est}
|
\addlegendentry{est}
|
||||||
\addlegendentry{$\nabla L \left[ 6 \right] $}
|
\addlegendentry{$\left(\nabla L \right)_6$}
|
||||||
\addlegendentry{$\nabla h \left[ 6 \right] $}
|
\addlegendentry{$\left(\nabla h \right)_6 $}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}\\
|
\end{tikzpicture}\\
|
||||||
\begin{tikzpicture}[scale = 0.35]
|
\begin{tikzpicture}[scale = 0.35]
|
||||||
@ -684,14 +684,14 @@ return $\boldsymbol{\hat{c}}$
|
|||||||
table [col sep=comma, x=k, y=grad_h_6]
|
table [col sep=comma, x=k, y=grad_h_6]
|
||||||
{res/proximal/comp_bch_7_4_combined.csv};
|
{res/proximal/comp_bch_7_4_combined.csv};
|
||||||
\addlegendentry{est}
|
\addlegendentry{est}
|
||||||
\addlegendentry{$\nabla L \left[ 7 \right] $}
|
\addlegendentry{$\left(\nabla L \right)_7$}
|
||||||
\addlegendentry{$\nabla h \left[ 7 \right] $}
|
\addlegendentry{$\left(\nabla h \right)_7 $}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}
|
\end{tikzpicture}
|
||||||
\end{minipage}
|
\end{minipage}
|
||||||
|
|
||||||
\caption{Internal variables of proximal decoder
|
\caption{Internal variables of proximal decoder
|
||||||
as a function of the iteration ($n=7$)\footnotemark}
|
as a function of the number of iterations ($n=7$)\footnotemark}
|
||||||
|
|
||||||
\footnotetext{A single decoding is shown, using the BCH$\left( 7,4 \right) $ code;
|
\footnotetext{A single decoding is shown, using the BCH$\left( 7,4 \right) $ code;
|
||||||
$\gamma = 0.05, \omega = 0.05, E_b / N_0 = \SI{5}{dB}$}
|
$\gamma = 0.05, \omega = 0.05, E_b / N_0 = \SI{5}{dB}$}
|
||||||
@ -1018,7 +1018,7 @@ $\textcolor{KITblue}{\text{Compute }d_H\left( \boldsymbol{ \tilde{c}}_n, \boldsy
|
|||||||
$\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d_H\left( \boldsymbol{ \tilde{c}}_n, \boldsymbol{\hat{c}} \right)}$
|
$\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d_H\left( \boldsymbol{ \tilde{c}}_n, \boldsymbol{\hat{c}} \right)}$
|
||||||
\end{algorithm}
|
\end{algorithm}
|
||||||
|
|
||||||
\caption{Hybrid proximal \& ML decoding algorithm}
|
\caption{Improved proximal decoding algorithm}
|
||||||
\end{figure}
|
\end{figure}
|
||||||
\end{minipage}
|
\end{minipage}
|
||||||
\end{frame}
|
\end{frame}
|
||||||
@ -1029,8 +1029,8 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
|
|||||||
\frametitle{Proximal Decoding: Improvement using ``ML-on-List''}
|
\frametitle{Proximal Decoding: Improvement using ``ML-on-List''}
|
||||||
|
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Comparison of proximal \& hybrid proximal-ML (correction of $N = \SI{12}{\bit}$)
|
\item Comparison of proximal \& improved (correction of $N = \SI{12}{\bit}$)
|
||||||
decoding simulation
|
decoding simulation%
|
||||||
\footnote{(3,6) regular LDPC code with $n=204, k=102$
|
\footnote{(3,6) regular LDPC code with $n=204, k=102$
|
||||||
\cite[Code: 204.33.484]{mackay_enc}}
|
\cite[Code: 204.33.484]{mackay_enc}}
|
||||||
results
|
results
|
||||||
@ -1150,13 +1150,13 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
|
|||||||
\addlegendentry{proximal, $\gamma = 0.05$}
|
\addlegendentry{proximal, $\gamma = 0.05$}
|
||||||
|
|
||||||
\addlegendimage{Emerald, mark=triangle, densely dashed}
|
\addlegendimage{Emerald, mark=triangle, densely dashed}
|
||||||
\addlegendentry{hybrid, $\gamma = 0.15$}
|
\addlegendentry{improved, $\gamma = 0.15$}
|
||||||
|
|
||||||
\addlegendimage{RoyalPurple, mark=triangle, densely dashed}
|
\addlegendimage{RoyalPurple, mark=triangle, densely dashed}
|
||||||
\addlegendentry{hybrid, $\gamma = 0.01$}
|
\addlegendentry{improved, $\gamma = 0.01$}
|
||||||
|
|
||||||
\addlegendimage{red, mark=triangle, densely dashed}
|
\addlegendimage{red, mark=triangle, densely dashed}
|
||||||
\addlegendentry{hybrid, $\gamma = 0.05$}
|
\addlegendentry{improved, $\gamma = 0.05$}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}
|
\end{tikzpicture}
|
||||||
|
|
||||||
@ -1433,19 +1433,19 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
|
|||||||
\addlegendentry{proximal, $\gamma = 0.15$}
|
\addlegendentry{proximal, $\gamma = 0.15$}
|
||||||
|
|
||||||
\addlegendimage{Emerald, mark=triangle, densely dashed}
|
\addlegendimage{Emerald, mark=triangle, densely dashed}
|
||||||
\addlegendentry{hybrid, $\gamma = 0.15$}
|
\addlegendentry{improved, $\gamma = 0.15$}
|
||||||
|
|
||||||
\addlegendimage{NavyBlue, mark=*, solid}
|
\addlegendimage{NavyBlue, mark=*, solid}
|
||||||
\addlegendentry{proximal, $\gamma = 0.01$}
|
\addlegendentry{proximal, $\gamma = 0.01$}
|
||||||
|
|
||||||
\addlegendimage{RoyalPurple, mark=triangle, densely dashed}
|
\addlegendimage{RoyalPurple, mark=triangle, densely dashed}
|
||||||
\addlegendentry{hybrid, $\gamma = 0.01$}
|
\addlegendentry{improved, $\gamma = 0.01$}
|
||||||
|
|
||||||
\addlegendimage{RedOrange, mark=*, solid}
|
\addlegendimage{RedOrange, mark=*, solid}
|
||||||
\addlegendentry{proximal, $\gamma = 0.05$}
|
\addlegendentry{proximal, $\gamma = 0.05$}
|
||||||
|
|
||||||
\addlegendimage{red, mark=triangle, densely dashed}
|
\addlegendimage{red, mark=triangle, densely dashed}
|
||||||
\addlegendentry{hybrid, $\gamma = 0.05$}
|
\addlegendentry{improved, $\gamma = 0.05$}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}
|
\end{tikzpicture}
|
||||||
|
|
||||||
@ -1466,7 +1466,7 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
|
|||||||
\begin{axis}[
|
\begin{axis}[
|
||||||
grid=both,
|
grid=both,
|
||||||
xlabel={Iterations},
|
xlabel={Iterations},
|
||||||
ylabel={Average $\left| \boldsymbol{x}-\boldsymbol{\hat{x}} \right|$},
|
ylabel={Average $\lVert \boldsymbol{c}-\boldsymbol{\hat{c}} \rVert$},
|
||||||
legend pos=outer north east,
|
legend pos=outer north east,
|
||||||
]
|
]
|
||||||
\addplot [ForestGreen, mark=none, line width=1pt]
|
\addplot [ForestGreen, mark=none, line width=1pt]
|
||||||
@ -1488,7 +1488,7 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
|
|||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}
|
\end{tikzpicture}
|
||||||
|
|
||||||
\caption{Average error for $\SI{500000}{}$ decodings,$
|
\caption{Average error for $\SI{500000}{}$ decodings, $
|
||||||
\omega = 0.05, \gamma = 0.05, K=200$\footnotemark}
|
\omega = 0.05, \gamma = 0.05, K=200$\footnotemark}
|
||||||
\end{figure}
|
\end{figure}
|
||||||
|
|
||||||
@ -1496,7 +1496,8 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
|
|||||||
\cite[Code: 204.33.484]{mackay_enc}}
|
\cite[Code: 204.33.484]{mackay_enc}}
|
||||||
|
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item For large $k$, the average error asymptotically approaches a minimum, non-zero value
|
\item With increasing iterations, the average error asymptotically
|
||||||
|
approaches a minimum, non-zero value
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{frame}
|
\end{frame}
|
||||||
|
|
||||||
@ -1513,6 +1514,7 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
|
|||||||
grid=both,
|
grid=both,
|
||||||
xlabel={$n$}, ylabel={time per frame (s)},
|
xlabel={$n$}, ylabel={time per frame (s)},
|
||||||
legend style={at={(0.05,0.77)},anchor=south west},
|
legend style={at={(0.05,0.77)},anchor=south west},
|
||||||
|
legend cell align={left},
|
||||||
]
|
]
|
||||||
\addplot[RedOrange, only marks, mark=*]
|
\addplot[RedOrange, only marks, mark=*]
|
||||||
table [col sep=comma, x=n, y=spf]
|
table [col sep=comma, x=n, y=spf]
|
||||||
@ -1522,7 +1524,7 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
|
|||||||
\addplot[RoyalPurple, only marks, mark=triangle*]
|
\addplot[RoyalPurple, only marks, mark=triangle*]
|
||||||
table [col sep=comma, x=n, y=spf]
|
table [col sep=comma, x=n, y=spf]
|
||||||
{res/hybrid/fps_vs_n.csv};
|
{res/hybrid/fps_vs_n.csv};
|
||||||
\addlegendentry{hybrid prox \& ML ($\SI{12}{\bit}$)}
|
\addlegendentry{improved ($\SI{12}{\bit}$)}
|
||||||
\end{axis}
|
\end{axis}
|
||||||
\end{tikzpicture}
|
\end{tikzpicture}
|
||||||
|
|
||||||
@ -1551,13 +1553,12 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
|
|||||||
\item Error coding performance (BER, FER, decoding failures)
|
\item Error coding performance (BER, FER, decoding failures)
|
||||||
\item Computational performance ($\mathcal{O}\left( n \right) $ time complexity,
|
\item Computational performance ($\mathcal{O}\left( n \right) $ time complexity,
|
||||||
fast implementation possible)
|
fast implementation possible)
|
||||||
\item Number of iterations required independant of SNR
|
\item Number of iterations independent of SNR
|
||||||
\item Operation during iteration (oscillation of estimate)
|
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\item Suggestion for improvement of proximal decoding:
|
\item Suggestion for improvement of proximal decoding:
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Addidion of ``ML-on-list'' step
|
\item Addition of ``ML-on-list'' step
|
||||||
\item $\sim\SI{1}{dB}$ gain under certain conditions
|
\item Up to $\sim\SI{1}{dB}$ gain under certain conditions
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{frame}
|
\end{frame}
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
\section{Forthcoming Examinations}%
|
\section{Forthcoming Analysis}%
|
||||||
\label{sec:Forthcoming Examinations}
|
\label{sec:Forthcoming Examinations}
|
||||||
|
|
||||||
|
|
||||||
@ -10,7 +10,7 @@
|
|||||||
\frametitle{Forthcoming Examinations}
|
\frametitle{Forthcoming Examinations}
|
||||||
|
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Test the (Alternating Direction Method of Multipliers) ADMM
|
\item Test ADMM (Alternating Direction Method of Multipliers)
|
||||||
as an optimization method for LP Decoding
|
as an optimization method for LP Decoding
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item In LP decoding, the ML decoding problem is reduced to a linear program,
|
\item In LP decoding, the ML decoding problem is reduced to a linear program,
|
||||||
@ -23,8 +23,8 @@
|
|||||||
\end{itemize}
|
\end{itemize}
|
||||||
\item Compare ADMM implementation with Proximal Decoding implementation with respect to
|
\item Compare ADMM implementation with Proximal Decoding implementation with respect to
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Decoding performance (BER, FER)
|
\item decoding performance (BER, FER)
|
||||||
\item Computational performance (time complexity, actual seconds per frame)
|
\item computational performance (time complexity, actual seconds per frame)
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
|
|||||||
@ -23,7 +23,7 @@
|
|||||||
|
|
||||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||||
\begin{frame}[t]
|
\begin{frame}[t]
|
||||||
\frametitle{Previous work}
|
\frametitle{Previous Work}
|
||||||
\begin{figure}[h]
|
\begin{figure}[h]
|
||||||
\centering
|
\centering
|
||||||
|
|
||||||
@ -47,8 +47,8 @@
|
|||||||
\end{figure}
|
\end{figure}
|
||||||
|
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Examination of ``Proximal Decoding''
|
\item Analysis of ``Proximal Decoding''
|
||||||
\item Examination of ``Interior Point Decoding''
|
\item Analysis of ``Interior Point Decoding''
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
\end{frame}
|
\end{frame}
|
||||||
|
|
||||||
@ -89,12 +89,13 @@
|
|||||||
\end{figure}
|
\end{figure}
|
||||||
|
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item All simulations are performed with BPSK modulation:
|
\item All simulations are performed with BPSK:
|
||||||
\begin{align*}
|
\begin{align*}
|
||||||
\boldsymbol{x} = \left( -1 \right)^{\boldsymbol{c}},
|
\boldsymbol{x} = \left( -1 \right)^{\boldsymbol{c}},
|
||||||
\hspace{5mm} \boldsymbol{c} \in \mathbb{F}_2^n
|
\hspace{5mm} \boldsymbol{c} \in \mathbb{F}_2^n,
|
||||||
|
\hspace{2mm} \boldsymbol{x} \in \mathbb{R}^n
|
||||||
\end{align*}
|
\end{align*}
|
||||||
\item The used channel model is AWGN:
|
\item The channel model is AWGN:
|
||||||
\begin{align*}
|
\begin{align*}
|
||||||
\boldsymbol{y} = \boldsymbol{x} + \boldsymbol{z},
|
\boldsymbol{y} = \boldsymbol{x} + \boldsymbol{z},
|
||||||
\hspace{5mm}\boldsymbol{z}\sim \mathcal{N}
|
\hspace{5mm}\boldsymbol{z}\sim \mathcal{N}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user