Added final presentation: modified version of midterm

This commit is contained in:
2023-04-12 00:16:06 +02:00
parent cc18c01479
commit 85dc43190f
58 changed files with 15112 additions and 0 deletions

View File

@@ -0,0 +1,5 @@
%\appendix
%
%\section{Proximal Decoding}%
%\label{app:Proximal Decoding}

View File

@@ -0,0 +1,325 @@
\section{Decoding Algorithms}%
\label{sec:Decoding Algorithms}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Proximal Decoding}%
\label{sub:Alg Proximal Decoding}
\begin{frame}[t]
\frametitle{Proximal Decoding: General Idea \cite{proximal_paper}}
\vspace*{-0.3cm}
\begin{itemize}
\item MAP rule:
\begin{align*}
\hat{\boldsymbol{x}}
&= \argmax_{\tilde{\boldsymbol{x}}\in\mathbb{R}^n}
f_{\boldsymbol{Y}}\left( \boldsymbol{y} | \tilde{\boldsymbol{x}} \right)
f_{\boldsymbol{X}}\left( \tilde{\boldsymbol{x}} \right)\\
&= \argmax_{\tilde{\boldsymbol{x}}\in\mathbb{R}^n}
e^{-L\left( \boldsymbol{y} | \tilde{\boldsymbol{x}}\right)}
f_{\boldsymbol{X}}\left( \tilde{\boldsymbol{x}} \right),
\hspace{5mm} L\left( \boldsymbol{y} | \tilde{\boldsymbol{x}} \right)
= - \ln\left( f_{\boldsymbol{Y}}
\left( \boldsymbol{y} | \tilde{\boldsymbol{x}} \right) \right)
\end{align*}
\item Approximation of prior PDF:
\begin{align*}
f_{\boldsymbol{X}}\left( \tilde{\boldsymbol{x}} \right)
= \frac{1}{\left| \mathcal{C} \right| }
\sum_{\boldsymbol{c} \in \mathcal{C} }
\delta\left( \tilde{\boldsymbol{x}} - \left( -1 \right)^{\boldsymbol{c}}
\right)
\approx \frac{1}{Z} e^{-\gamma h\left( \tilde{\boldsymbol{x}} \right) }
\end{align*}
\item Code constraint polynomial:
\begin{minipage}[c]{0.56\textwidth}
\raggedright
\begin{align*}
h\left( \tilde{\boldsymbol{x}} \right) =
\underbrace{\sum_{i=1}^{n} \left( \tilde{x}_i^2 - 1 \right)^2}_{\text{Bipolar
constraint}}
+ \underbrace{\sum_{j=1}^{m} \left[ \left(
\prod_{i\in N_v\left( j \right)} \tilde{x}_i\right) -1 \right]^2}
_{\text{Parity constraint}},
\end{align*}
\end{minipage}%
\begin{minipage}[c]{0.4\textwidth}
\raggedleft
\begin{flalign*}
\mathcal{I} &:= \left[1\text{ : }n\right],
\hspace{2mm} \mathcal{J} := \left[1\text{ : }m\right] \\
N_v\left( j \right) &:= \left\{i | i\in \mathcal{I},
\boldsymbol{H}_{j,i} = 1
\right\}, j\in\mathcal{J}\\
\end{flalign*}
\end{minipage}
\hfill
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]
\frametitle{Proximal Decoding: General Idea}
\begin{itemize}
\item Objective function:
\begin{align*}
g\left( \tilde{\boldsymbol{x}} \right)
= L\left( \boldsymbol{y} | \tilde{\boldsymbol{x}} \right)
+ \gamma h\left( \tilde{\boldsymbol{x}} \right)
\end{align*}
\note{Notational difference between $f$ and $f_X$ or $f_Y$}
\item Proximal operator \cite{proximal_algorithms}:
\begin{align*}
\text{prox}_{\gamma h} \left( \tilde{\boldsymbol{x}} \right) &\equiv
\argmin_{\boldsymbol{t}\in\mathbb{R}^n} \left(
\gamma h\left( \boldsymbol{t} \right) + \frac{1}{2} \lVert \boldsymbol{t}
- \tilde{\boldsymbol{x}} \rVert^2 \right)\\
&\approx \tilde{\boldsymbol{x}}
- \gamma \nabla h\left( \tilde{\boldsymbol{x}} \right),
\hspace{5mm} \gamma \text{ small}
\end{align*}
\item Iterative decoding process:
\begin{align*}
\boldsymbol{r} &\leftarrow \boldsymbol{s}
- \omega \nabla L\left( \boldsymbol{y} | \boldsymbol{s}
\right), \hspace{5mm} \omega > 0
\hspace{10mm} \text{``Gradient descent step''}\\
\boldsymbol{s} &\leftarrow \boldsymbol{r}
- \gamma \nabla h\left( \boldsymbol{r}
\right), \hspace{9mm} \gamma > 0
\hspace{10mm} \text{``Code proximal step''}
\end{align*}
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t, fragile]
\frametitle{Proximal Decoding: Algorithm}
\begin{itemize}
\item Iterative decoding algorithm \cite{proximal_paper}:
\end{itemize}
\vspace{2mm}
\begin{algorithm}[caption={}, label={}]
$\boldsymbol{s} \leftarrow \boldsymbol{0}$
for $K$ iterations do
$\boldsymbol{r} \leftarrow \boldsymbol{s} - \omega \nabla L \left( \boldsymbol{y} \mid \boldsymbol{s} \right) $
$\boldsymbol{s} \leftarrow \boldsymbol{r} - \gamma \nabla h\left( \boldsymbol{r} \right) $
$\boldsymbol{\hat{x}} \leftarrow \text{sign}\left( \boldsymbol{s} \right) $
if $\boldsymbol{H}\boldsymbol{\hat{c}} = \boldsymbol{0}$ do
return $\boldsymbol{\hat{c}}$
end if
end for
return $\boldsymbol{\hat{c}}$
\end{algorithm}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{LP Decoding}%
\label{sub:LP Decoding}
\begin{frame}[t]
\frametitle{LP Decoding \cite{feldman_paper}}
\begin{minipage}[c]{0.6\linewidth}
\begin{itemize}
\item Codeword polytope:
\begin{align*}
\text{poly}\left( \mathcal{C} \right) =
\left\{
\sum_{\boldsymbol{c}\in\mathcal{C}}\alpha_{\boldsymbol{c}}
\boldsymbol{c} : \alpha_{\boldsymbol{c}} \ge 0,
\sum_{\boldsymbol{c}\in\mathcal{C}}\alpha_{\boldsymbol{c}} = 1
\right\},
\hspace{5mm} \alpha_{\boldsymbol{c}} \in \mathbb{R}_{\ge 0}
\end{align*}
\item Cost function:
\begin{align*}
\boldsymbol{\gamma}^{T} \tilde{\boldsymbol{c}} = \sum_{i=1}^{n}
\gamma_i \tilde{c}_i,
\hspace{5mm}\gamma_i = \ln\left(
\frac{p_{Y_i \mid C_i}\left( y_i | c_i = 0 \right) }
{p_{Y_i \mid C_i}\left(y_i | c_i=1 \right) } \right)
\end{align*}
\item Exact LP formulation of ML decoding:
\begin{align*}
&\text{minimize } \boldsymbol{\gamma}^\text{T} \tilde{\boldsymbol{c}}\\
&\text{subject to } \tilde{\boldsymbol{c}}\in\text{poly}
\left( \mathcal{C} \right)
\end{align*}
\end{itemize}
\end{minipage}%
\hfill%
\begin{minipage}[c]{0.4\linewidth}
\begin{figure}[H]
\centering
\tikzstyle{codeword} = [color=KITblue, fill=KITblue,
draw, circle, inner sep=0pt, minimum size=4pt]
\tdplotsetmaincoords{60}{245}
\begin{tikzpicture}[scale=1, transform shape, tdplot_main_coords]
% Cube
\draw[dashed] (0, 0, 0) -- (2, 0, 0);
\draw[dashed] (2, 0, 0) -- (2, 0, 2);
\draw[] (2, 0, 2) -- (0, 0, 2);
\draw[] (0, 0, 2) -- (0, 0, 0);
\draw[] (0, 2, 0) -- (2, 2, 0);
\draw[] (2, 2, 0) -- (2, 2, 2);
\draw[] (2, 2, 2) -- (0, 2, 2);
\draw[] (0, 2, 2) -- (0, 2, 0);
\draw[] (0, 0, 0) -- (0, 2, 0);
\draw[dashed] (2, 0, 0) -- (2, 2, 0);
\draw[] (2, 0, 2) -- (2, 2, 2);
\draw[] (0, 0, 2) -- (0, 2, 2);
% Codeword Polytope
\draw[line width=1pt, color=KITblue] (0, 0, 0) -- (2, 0, 2);
\draw[line width=1pt, color=KITblue] (0, 0, 0) -- (2, 2, 0);
\draw[line width=1pt, color=KITblue] (0, 0, 0) -- (0, 2, 2);
\draw[line width=1pt, color=KITblue] (2, 0, 2) -- (2, 2, 0);
\draw[line width=1pt, color=KITblue] (2, 0, 2) -- (0, 2, 2);
\draw[line width=1pt, color=KITblue] (0, 2, 2) -- (2, 2, 0);
% Polytope Annotations
\node[codeword] (c000) at (0, 0, 0) {};% {$\left( 0, 0, 0 \right) $};
\node[codeword] (c101) at (2, 0, 2) {};% {$\left( 1, 0, 1 \right) $};
\node[codeword] (c110) at (2, 2, 0) {};% {$\left( 1, 1, 0 \right) $};
\node[codeword] (c011) at (0, 2, 2) {};% {$\left( 0, 1, 1 \right) $};
\node[color=KITblue, right=0cm of c000] {$\left( 0, 0, 0 \right) $};
\node[color=KITblue, above=0cm of c101] {$\left( 1, 0, 1 \right) $};
\node[color=KITblue, left=0cm of c110] {$\left( 1, 1, 0 \right) $};
\node[color=KITblue, left=0cm of c011] {$\left( 0, 1, 1 \right) $};
% f
\node[color=KITgreen, fill=KITgreen,
draw, circle, inner sep=0pt, minimum size=4pt] (f) at (0.7, 0.7, 1) {};
\node[color=KITgreen, right=0cm of f] {$\tilde{\boldsymbol{c}}$};
\end{tikzpicture}
\caption{$\text{poly}\left( \mathcal{C} \right)$ for single parity-check code with
$n=3$}
\end{figure}
\end{minipage}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[t]
% \frametitle{LP Relaxation}
%
% \begin{minipage}[c]{0.6\linewidth}
% \begin{itemize}
% \item Set of all variable nodes incident to a check node:
% \begin{align*}
% N\left( j \right) \equiv \left\{
% i | i\in \mathcal{I},
% \boldsymbol{H}_{j,i} = 1
% \right\},
% j \in \mathcal{J}
% \end{align*}
% \begin{align*}
% S \subseteq N\left( j \right), \left| S \right| \text{odd}
% \end{align*}
% \item Relaxed polytope representation:
% \begin{align*}
% \sum_{i\in \left( N\left( j \right) \setminus S\right) } f_i
% + \sum_{i\in S} \left( 1 - f_i \right) \ge 1
% \end{align*}
% ``$\boldsymbol{f}$ is separated by at least one bitflip
% from all illegal configurations''
% \end{itemize}
% \end{minipage}%
% \hfill%
% \begin{minipage}[c]{0.4\linewidth}
% \begin{figure}[H]
% \centering
%
% \tikzstyle{codeword} = [color=KITblue, fill=KITblue,
% draw, circle, inner sep=0pt, minimum size=4pt]
%
% \tdplotsetmaincoords{60}{245}
% \begin{tikzpicture}[scale=1, transform shape, tdplot_main_coords]
% % Cube
%
% \draw[dashed] (0, 0, 0) -- (2, 0, 0);
% \draw[dashed] (2, 0, 0) -- (2, 0, 2);
% \draw[] (2, 0, 2) -- (0, 0, 2);
% \draw[] (0, 0, 2) -- (0, 0, 0);
%
% \draw[] (0, 2, 0) -- (2, 2, 0);
% \draw[] (2, 2, 0) -- (2, 2, 2);
% \draw[] (2, 2, 2) -- (0, 2, 2);
% \draw[] (0, 2, 2) -- (0, 2, 0);
%
% \draw[] (0, 0, 0) -- (0, 2, 0);
% \draw[dashed] (2, 0, 0) -- (2, 2, 0);
% \draw[] (2, 0, 2) -- (2, 2, 2);
% \draw[] (0, 0, 2) -- (0, 2, 2);
%
% % Codeword Polytope
%
% \draw[line width=1pt, color=KITblue] (0, 0, 0) -- (2, 0, 2);
% \draw[line width=1pt, color=KITblue] (0, 0, 0) -- (2, 2, 0);
% \draw[line width=1pt, color=KITblue] (0, 0, 0) -- (0, 2, 2);
%
% \draw[line width=1pt, color=KITblue] (2, 0, 2) -- (2, 2, 0);
% \draw[line width=1pt, color=KITblue] (2, 0, 2) -- (0, 2, 2);
%
% \draw[line width=1pt, color=KITblue] (0, 2, 2) -- (2, 2, 0);
%
% % Polytope Annotations
%
% \node[codeword, color=KITred] (c111) at (2, 2, 2) {};% {$\left( 0, 0, 0 \right) $};
% \node[codeword, color=KITred] (c001) at (0, 0, 2) {};% {$\left( 1, 0, 1 \right) $};
% \node[codeword, color=KITred] (c100) at (2, 0, 0) {};% {$\left( 1, 1, 0 \right) $};
% \node[codeword, color=KITred] (c010) at (0, 2, 0) {};% {$\left( 0, 1, 1 \right) $};
%
% \node[color=KITred, left=0cm of c111] {$\left( 1, 1, 1 \right) $};
% \node[color=KITred, right=0cm of c001] {$\left( 0, 0, 1 \right) $};
% \node[color=KITred, right=0.35cm of c100] {$\left( 1, 0, 0 \right) $};
% \node[color=KITred, below=0cm of c010] {$\left( 0, 1, 0 \right) $};
% \end{tikzpicture}
% \caption{Relaxed polytope for $n=3$}
% \end{figure}
% \end{minipage}
% \todo{How is this a relaxation and not just an alternative formulation?
% We have just switched out valid codewords for invalid ones}
% \todo{Is LP Relaxation relevant as theoretical background?}
%\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\subsection{ADMM}%
%\label{sub:Alg ADMM}
%
%\begin{frame}[t]
% \frametitle{ADMM Decoding: General Idea}
%
% \todo{TODO}
%\end{frame}
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\begin{frame}[t]
% \frametitle{ADMM Decoding: Algorithm}
%
% \todo{TODO}
%\end{frame}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,32 @@
\section{Forthcoming Analysis}%
\label{sec:Forthcoming Examinations}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{LP Decoding}%
\label{sub:Forth Exam LP Decoding}
\begin{frame}[t]
\frametitle{Forthcoming Analysis}
\begin{itemize}
\item Test ADMM (Alternating Direction Method of Multipliers)
as an optimization method for LP decoding
\begin{itemize}
\item In LP decoding, the ML decoding problem is reduced to a linear program,
which can be solved in polynomial time \cite{lautern}
\item ADMM is intended to blend the decomposability
of dual ascent with the superior convergence properties of the method
of multipliers \cite{distr_opt_book}
\item ADMM has been proposed for efficient LP decoding
\cite{efficient_lp_dec_admm}
\end{itemize}
\item Compare ADMM implementation with proximal decoding implementation with respect to
\begin{itemize}
\item decoding performance (BER, FER)
\item computational performance (time complexity, actual seconds per frame)
\end{itemize}
\end{itemize}
\end{frame}

View File

@@ -0,0 +1,20 @@
\begin{frame}[t]
\frametitle{Questions}
\begin{minipage}[c]{0.65\textwidth}
\centering
\LARGE Thank you for your attention!\\ Any questions?
\end{minipage}%
\begin{minipage}[c]{0.35\textwidth}
\centering
\begin{figure}[H]
\centering
\begin{tikzpicture}[every node/.style={scale=14}]
\node at (0, 0) {\textcolor{KITblue}{?}};
\end{tikzpicture}
\end{figure}
\end{minipage}
\end{frame}

View File

@@ -0,0 +1,191 @@
\section{Theoretical Background}%
\label{sec:Theoretical Background}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Motivation}%
\label{sub:Motivation}
\begin{frame}[t]
\frametitle{Motivation}
\begin{itemize}
\item The general [ML] decoding problem for linear codes and the general problem
of finding the weights of a linear code are both NP-complete. \cite{ml_np_hard_proof}
\item The iterative messagepassing algorithms preferred in practice do not guarantee
optimality and may fail to decode correctly when the graph contains cycles.
\cite{ldpc_conv}
\item The standard message-passing algorithms used for decoding [LDPC and turbo codes]
are often difficult to analyze. \cite{feldman_thesis}
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]
\frametitle{Previous Work}
\begin{figure}[h]
\centering
\begin{subfigure}{0.33\textwidth}
\centering
\fbox{\includegraphics[page=1,width=.6\textwidth]{res/Bachelor_Thesis_Yanxia_Lu}}
\end{subfigure}%
\begin{subfigure}{0.33\textwidth}
\centering
\fbox{\includegraphics[page=25,width=.6\textwidth]{res/Bachelor_Thesis_Yanxia_Lu}}
\end{subfigure}%
\begin{subfigure}{0.33\textwidth}
\centering
\fbox{\includegraphics[page=60,width=.6\textwidth]{res/Bachelor_Thesis_Yanxia_Lu}}
\end{subfigure}%
\caption{Bachelor's Thesis by Yanxia Lu \cite{yanxia_lu_thesis}}
\end{figure}
\begin{itemize}
\item Analysis of ``Proximal Decoding''
\item Analysis of ``Interior Point Decoding''
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Presumptions}%
\label{sub:Presumptions}
\begin{frame}[t]
\frametitle{Presumptions: Channel \& Modulation}
\tikzstyle{mapper} = [rectangle, minimum width=1.5cm, minimum height=0.7cm,
rounded corners=0.1cm, text centered, draw=black, fill=KITgreen!80]
\begin{figure}[htpb]
\centering
\begin{tikzpicture}[scale=1, transform shape]
\node (in) {$\boldsymbol{c}$};
\node[mapper, right=0.5cm of in] (bpskmap) {Mapper};
\node[right=1.5cm of bpskmap,
draw, circle, inner sep=0pt, minimum size=0.5cm] (add) {$+$};
\node[below=0.5cm of add] (noise) {$\boldsymbol{n}$};
\node[mapper, right=1.5cm of add] (decoder) {Decoder};
\node[mapper, right=1.5cm of decoder] (demapper) {Demapper};
\node[right=0.5cm of demapper] (out) {$\boldsymbol{\hat{c}}$};
\node at ($(bpskmap.east)!0.5!(add.west) + (0,0.3cm)$) {$\boldsymbol{x}$};
\node at ($(add.east)!0.5!(decoder.west) + (0,0.3cm)$) {$\boldsymbol{y}$};
\node at ($(decoder.east)!0.5!(demapper.west) + (0,0.3cm)$) {$\boldsymbol{\hat{x}}$};
\draw[->] (in) -- (bpskmap);
\draw[->] (bpskmap) -- (add);
\draw[->] (add) -- (decoder);
\draw[->] (noise) -- (add);
\draw[->] (decoder) -- (demapper);
\draw[->] (demapper) -- (out);
\end{tikzpicture}
\end{figure}
\begin{itemize}
\item All simulations are performed with BPSK:
\begin{align*}
\boldsymbol{x} = \left( -1 \right)^{\boldsymbol{c}},
\hspace{5mm} \boldsymbol{c} \in \mathbb{F}_2^n,
\hspace{2mm} \boldsymbol{x} \in \mathbb{R}^n
\end{align*}
\item The channel model is AWGN:
\begin{align*}
\boldsymbol{y} = \boldsymbol{x} + \boldsymbol{n},
\hspace{5mm}\boldsymbol{n}\sim \mathcal{N}
\left(0,\frac{1}{2}\left(\frac{k}{n}\frac{E_b}{N_0}\right)^{-1}\right),
\hspace{2mm} \boldsymbol{y}, \boldsymbol{n} \in \mathbb{R}^n
\end{align*}
\item All-zeros assumption:
\begin{align*}
\boldsymbol{c} = \boldsymbol{0}
\end{align*}
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Optimization as a Decoding Method}%
\label{sub:Optimization as a Decoding Method}
\begin{frame}[t]
\frametitle{Optimization as a Decoding Method}
\begin{minipage}[c]{0.6\linewidth}
\begin{itemize}
\item Reformulate decoding problem as optimization problem
\begin{itemize}
\item Establish objective function
\item Establish constraints
\end{itemize}
\item Use optimization method to solve the new problem
\end{itemize}
\end{minipage}%
\hfill%
\begin{minipage}[c]{0.4\linewidth}
\begin{figure}[H]
\centering
\tikzstyle{codeword} = [color=KITblue, fill=KITblue,
draw, circle, inner sep=0pt, minimum size=4pt]
\tdplotsetmaincoords{60}{245}
\begin{tikzpicture}[scale=1, transform shape, tdplot_main_coords]
% Cube
\draw[dashed] (0, 0, 0) -- (2, 0, 0);
\draw[dashed] (2, 0, 0) -- (2, 0, 2);
\draw[] (2, 0, 2) -- (0, 0, 2);
\draw[] (0, 0, 2) -- (0, 0, 0);
\draw[] (0, 2, 0) -- (2, 2, 0);
\draw[] (2, 2, 0) -- (2, 2, 2);
\draw[] (2, 2, 2) -- (0, 2, 2);
\draw[] (0, 2, 2) -- (0, 2, 0);
\draw[] (0, 0, 0) -- (0, 2, 0);
\draw[dashed] (2, 0, 0) -- (2, 2, 0);
\draw[] (2, 0, 2) -- (2, 2, 2);
\draw[] (0, 0, 2) -- (0, 2, 2);
% Codeword Polytope
% \draw[line width=1pt, color=KITblue] (0, 0, 0) -- (2, 0, 2);
% \draw[line width=1pt, color=KITblue] (0, 0, 0) -- (2, 2, 0);
% \draw[line width=1pt, color=KITblue] (0, 0, 0) -- (0, 2, 2);
% \draw[line width=1pt, color=KITblue] (2, 0, 2) -- (2, 2, 0);
% \draw[line width=1pt, color=KITblue] (2, 0, 2) -- (0, 2, 2);
% \draw[line width=1pt, color=KITblue] (0, 2, 2) -- (2, 2, 0);
% Polytope Annotations
\node[codeword] (c000) at (0, 0, 0) {};% {$\left( 0, 0, 0 \right) $};
\node[codeword] (c101) at (2, 0, 2) {};% {$\left( 1, 0, 1 \right) $};
\node[codeword] (c110) at (2, 2, 0) {};% {$\left( 1, 1, 0 \right) $};
\node[codeword] (c011) at (0, 2, 2) {};% {$\left( 0, 1, 1 \right) $};
\node[color=KITblue, right=0cm of c000] {$\left( 0, 0, 0 \right) $};
\node[color=KITblue, above=0cm of c101] {$\left( 1, 0, 1 \right) $};
\node[color=KITblue, left=0cm of c110] {$\left( 1, 1, 0 \right) $};
\node[color=KITblue, left=0cm of c011] {$\left( 0, 1, 1 \right) $};
% f
\node[color=KITgreen, fill=KITgreen,
draw, circle, inner sep=0pt, minimum size=4pt] (f) at (0.7, 0.7, 1) {};
\node[color=KITgreen, right=0cm of f] {$\tilde{\boldsymbol{c}}$};
\end{tikzpicture}
\caption{Hypercube ($n=3$) with valid codewords}
\end{figure}
\end{minipage}
\end{frame}