Added supplementary slides for choice of gamma, mu and rho

This commit is contained in:
Andreas Tsouchlos 2023-04-19 00:30:42 +02:00
parent 58ae265fd3
commit 8f3a74ae63
3 changed files with 27 additions and 15 deletions

View File

@ -76,7 +76,7 @@
@mastersthesis{yanxia_lu_thesis,
author = {Lu, Yanxia},
title = {Realization of Channel Decoding Using Optimization Techniques},
year = {2023},
year = {2022},
type = {Bachelor's Thesis},
institution = {KIT},
}

View File

@ -87,7 +87,7 @@ return $\boldsymbol{s}$
+ \underbrace{\sum\nolimits_{j\in\mathcal{J}} g_j\left( \boldsymbol{T}_j\tilde{\boldsymbol{c}} \right) }
_{\text{Constraints}} \\
\text{subject to}\hspace{5mm} &
\tilde{\boldsymbol{c}} \in \mathbb{R}^n
\tilde{\boldsymbol{c}} \in \left[ 0, 1 \right]^n
\end{align*}
\begin{genericAlgorithm}[caption={}, label={},

View File

@ -21,7 +21,7 @@ For example:%
%
\begin{align*}
x \in \left\{ -1, 1 \right\} &\to \tilde{x} \in \mathbb{R}\\
c \in \mathbb{F}_2 &\to \tilde{c} \in \left[ 0, 1 \right]
c \in \mathbb{F}_2 &\to \tilde{c} \in \left[ 0, 1 \right] \subseteq \mathbb{R}
.\end{align*}
%
Additionally, a shorthand notation will be used to denote series of indices and series
@ -29,9 +29,10 @@ of indexed variables:%
%
\begin{align*}
\left[ m:n \right] &:= \left\{ m, m+1, \ldots, n-1, n \right\},
\hspace{5mm} m,n\in\mathbb{Z}\\
\hspace{5mm} m < n, \hspace{2mm} m,n\in\mathbb{Z}\\
x_{\left[ m:n \right] } &:= \left\{ x_m, x_{m+1}, \ldots, x_{n-1}, x_n \right\}
.\end{align*}
\todo{Not really slicing. How should it be denoted?}
%
In order to designate elemen-twise operations, in particular the \textit{Hadamard product}
and the \textit{Hadamard power}, the operator $\circ$ will be used:%
@ -50,16 +51,17 @@ and the \textit{Hadamard power}, the operator $\circ$ will be used:%
\section{Preliminaries: Channel Model and Modulation}
\label{sec:theo:Preliminaries: Channel Model and Modulation}
In order to transmit a bit-word $\boldsymbol{c}$ of length $n$ over a channel,
it has to be mapped onto a symbol $\boldsymbol{x}$ that can be physically
transmitted.
In order to transmit a bit-word $\boldsymbol{c} \in \mathbb{F}_2^n$ of length
$n$ over a channel, it has to be mapped onto a symbol
$\boldsymbol{x} \in \mathbb{R}^n$ that can be physically transmitted.
This is known as modulation. The modulation scheme chosen here is \ac{BPSK}:%
%
\begin{align*}
\boldsymbol{x} = \left( -1 \right)^{\boldsymbol{c}}
\boldsymbol{x} = 1 - 2\boldsymbol{c}
.\end{align*}
%
The symbol that reaches the receiver, $\boldsymbol{y}$, is distorted by the channel.
The transmitted symbol is distorted by the channel and denoted by
$\boldsymbol{y} \in \mathbb{R}^n$.
This distortion is described by the channel model, which in the context of
this thesis is chosen to be \ac{AWGN}:%
%
@ -87,7 +89,7 @@ of the channel \cite[Sec. II.B.]{mackay_rediscovery} while having a structure
that allows for very efficient decoding.
The lengths of the data words and codewords are denoted by $k\in\mathbb{N}$
and $n\in\mathbb{N}$, respectively.
and $n\in\mathbb{N}$, respectively, with $k \le n$.
The set of codewords $\mathcal{C} \subset \mathbb{F}_2^n$ of a binary
linear code can be represented using the \textit{parity-check matrix}
$\boldsymbol{H} \in \mathbb{F}_2^{m\times n}$, where $m$ represents
@ -103,14 +105,14 @@ $\boldsymbol{c} \in \mathbb{F}_2^n$ using the \textit{generator matrix}
$\boldsymbol{G} \in \mathbb{F}_2^{k\times n}$:%
%
\begin{align*}
\boldsymbol{c} = \boldsymbol{u}^\text{T}\boldsymbol{G}
\boldsymbol{c} = \boldsymbol{u}\boldsymbol{G}
.\end{align*}
%
After obtaining a codeword from a data word, it is transmitted over a channel
as described in section \ref{sec:theo:Preliminaries: Channel Model and Modulation}.
The received signal $\boldsymbol{y}$ is then decoded to obtain
an estimate of the transmitted codeword, $\hat{\boldsymbol{c}}$.
an estimate of the transmitted codeword, denoted as $\hat{\boldsymbol{c}}$.
Finally, the encoding procedure is reversed and an estimate of the originally
sent data word, $\hat{\boldsymbol{u}}$, is produced.
The methods examined in this work are all based on \textit{soft-decision} decoding,
@ -170,7 +172,17 @@ criterion:%
\right)
.\end{align*}%
%
The \ac{MAP}- and \ac{ML}-criteria are closely connected through
\textit{Bayes' theorem}:%
%
\begin{align*}
\argmax_{c\in\mathcal{C}} p_{\boldsymbol{C} \mid \boldsymbol{Y}}
\left( \boldsymbol{c} \mid \boldsymbol{y} \right)
= TODO
.\end{align*}
%
This has the consequence that if the probability \ldots, the two criteria are
equivalent.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@ -472,8 +484,8 @@ and minimizing $g$ using the proximal operator
\cite[Sec. 4.2]{proximal_algorithms}:%
%
\begin{align*}
\boldsymbol{x} \leftarrow \boldsymbol{x} - \lambda \nabla f\left( \boldsymbol{x} \right) \\
\boldsymbol{x} \leftarrow \textbf{prox}_{\lambda g} \left( \boldsymbol{x} \right)
\boldsymbol{x} &\leftarrow \boldsymbol{x} - \lambda \nabla f\left( \boldsymbol{x} \right) \\
\boldsymbol{x} &\leftarrow \textbf{prox}_{\lambda g} \left( \boldsymbol{x} \right)
,\end{align*}
%
Since $g$ is minimized with the proximal operator and is thus not required