Added conclusion slide; Fixed Boyd book citation

This commit is contained in:
Andreas Tsouchlos 2023-01-26 15:33:16 +01:00
parent 788364f12d
commit 56cb023318
3 changed files with 23 additions and 74 deletions

View File

@ -77,9 +77,9 @@
institution = {KIT},
}
@BOOK{distr_opt_book,
@book{distr_opt_book,
author = {Boyd, Stephen and Parikh, Neal and Chu, Eric and Peleato, Borja and Eckstein, Jonathan},
booktitle = {Distributed Optimization and Statistical Learning via the Alternating Direction Method of Multipliers},
title = {Distributed Optimization and Statistical Learning via the Alternating Direction Method of Multipliers},
year = {2011},
volume = {},
number = {},

View File

@ -1504,74 +1504,23 @@ $\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{c}}_n\text{ with lowest }d
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[t, fragile]
% \frametitle{Proximal Decoding: Improvement using ``ML-on-List''}
% \setcounter{footnote}{0}
%
% \begin{itemize}
% \item Improvement of proximal decoding by adding an ``ML-on-list'' step after iterating
% \end{itemize}
%
% \begin{minipage}[t]{.48\textwidth}
% \centering
%
% \begin{figure}
% \centering
%
% \begin{algorithm}[caption={}, label={},
% basicstyle=\fontsize{7.5}{9.5}\selectfont
% ]
%$\boldsymbol{s}^{\left( 0 \right)} = \boldsymbol{0}$
%for $k=0$ to $K-1$ do
% $\boldsymbol{r}^{\left( k+1 \right)} = \boldsymbol{s}^{(k)} - \omega \nabla L \left( \boldsymbol{s}^{(k)}; \boldsymbol{y} \right) $
% Compute $\nabla h\left( \boldsymbol{r}^{\left( k+1 \right) } \right)$
% $\boldsymbol{s}^{\left( k+1 \right)} = \boldsymbol{r}^{(k+1)} - \gamma \nabla h\left( \boldsymbol{r}^{\left( k+1 \right) } \right) $
% $\boldsymbol{\hat{x}} = \text{sign}\left( \boldsymbol{s}^{\left( k+1 \right) } \right) $
% If $\boldsymbol{\hat{x}}$ passes the parity check condition, break the loop.
%end for
%Output $\boldsymbol{\hat{x}}$
% \end{algorithm}
%
% \caption{Proximal decoding algorithm \cite{proximal_paper}}
% \end{figure}
%
% \end{minipage}%
% \hfill\begin{minipage}[t]{.48\textwidth}
% \centering
% \begin{figure}
% \centering
%
% \begin{algorithm}[caption={}, label={},
% basicstyle=\fontsize{7.5}{9.5}\selectfont
% ]
%$\boldsymbol{s}^{\left( 0 \right)} = \boldsymbol{0}$
%for $k=0$ to $K-1$ do
% $\boldsymbol{r}^{\left( k+1 \right)} = \boldsymbol{s}^{(k)} - \omega \nabla L \left( \boldsymbol{s}^{(k)}; \boldsymbol{y} \right) $
% Compute $\nabla h\left( \boldsymbol{r}^{\left( k+1 \right) } \right)$
% $\boldsymbol{s}^{\left( k+1 \right)} = \boldsymbol{r}^{(k+1)} - \gamma \nabla h\left( \boldsymbol{r}^{\left( k+1 \right) } \right) $
% $\boldsymbol{\hat{x}} = \text{sign}\left( \boldsymbol{s}^{\left( k+1 \right) } \right) $
% $\textcolor{KITblue}{\text{If }\boldsymbol{\hat{x}}\text{ passes the parity check condition, output }\boldsymbol{\hat{x}}}$
%end for
%$\textcolor{KITblue}{\text{Find }N\text{ most probably wrong bits.}}$
%$\textcolor{KITblue}{\text{Generate variations } \boldsymbol{\tilde{x}}_n\text{ of }\boldsymbol{\hat{x}}\text{ with the }N\text{ bits modified.}}$
%$\textcolor{KITblue}{\text{Compute }\langle \boldsymbol{ \tilde{x}}_n, \boldsymbol{\hat{x}} \rangle \forall n \in \left[ 1 : N-1 \right]}$
%$\textcolor{KITblue}{\text{Output }\boldsymbol{\tilde{x}}_n\text{ with lowest }\langle \boldsymbol{ \tilde{x}}_n, \boldsymbol{\hat{x}} \rangle}$
% \end{algorithm}
%
% \caption{Hybrid proximal \& ML decoding algorithm}
% \end{figure}
% \end{minipage}
%\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\subsection{ADMM: Examination Results}%
%\label{sub:Ex ADMM}
%
%\begin{frame}[t]
% \frametitle{ADMM}
%
% \todo{TODO}
%\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]
\frametitle{Conclusion}
\begin{itemize}
\item Analysis of proximal decoding for AWGN channels:
\begin{itemize}
\item Error coding performance (BER, FER, decoding failures)
\item Computational performance ($\mathcal{O}\left( n \right) $ time complexity,
fast implementation possible)
\item Number of iterations required independant of SNR
\item Operation during iteration (oscillation of estimate)
\end{itemize}
\item Suggestion for improvement of proximal decoding:
\begin{itemize}
\item Addidion of ``ML-on-list'' step
\item $\sim\SI{1}{dB}$ gain under certain conditions
\end{itemize}
\end{itemize}
\end{frame}

View File

@ -16,7 +16,7 @@
\item ADMM is intended to blend the decomposability
of dual ascent with the superior convergence properties of the method
of multipliers \cite{distr_opt_book}
\item Recently, ADMM has been proposed for efficient LP Decoding
\item ADMM has been proposed for efficient LP Decoding
\cite{efficient_lp_dec_admm}
\end{itemize}
\item Compare ADMM implementation with Proximal Decoding implementation with respect to