Added tilde to x; P -> p; Minor wording changes

This commit is contained in:
Andreas Tsouchlos 2023-03-11 15:23:10 +01:00
parent eceda0b50f
commit 6513fd2297
2 changed files with 54 additions and 48 deletions

View File

@ -27,7 +27,7 @@ the \ac{ML} decoding problem:%
% %
\begin{align*} \begin{align*}
\hat{\boldsymbol{c}}_{\text{\ac{MAP}}} &= \argmax_{\boldsymbol{c} \in \mathcal{C}} \hat{\boldsymbol{c}}_{\text{\ac{MAP}}} &= \argmax_{\boldsymbol{c} \in \mathcal{C}}
P \left(\boldsymbol{c} \mid \boldsymbol{Y} = \boldsymbol{y} p_{\boldsymbol{C} \mid \boldsymbol{Y}} \left(\boldsymbol{c} \mid \boldsymbol{y}
\right)\\ \right)\\
\hat{\boldsymbol{c}}_{\text{\ac{ML}}} &= \argmax_{\boldsymbol{c} \in \mathcal{C}} \hat{\boldsymbol{c}}_{\text{\ac{ML}}} &= \argmax_{\boldsymbol{c} \in \mathcal{C}}
f_{\boldsymbol{Y} \mid \boldsymbol{C}} \left( \boldsymbol{y} \mid \boldsymbol{c} f_{\boldsymbol{Y} \mid \boldsymbol{C}} \left( \boldsymbol{y} \mid \boldsymbol{c}
@ -182,7 +182,7 @@ They begin by looking at the \ac{ML} decoding problem%
making the \ac{ML} and \ac{MAP} decoding problems equivalent.}% making the \ac{ML} and \ac{MAP} decoding problems equivalent.}%
% %
\begin{align} \begin{align}
\hat{\boldsymbol{c}} = \argmax_{\boldsymbol{c} \in \mathcal{C}} \hat{\boldsymbol{c}}_{\text{\ac{ML}}} = \argmax_{\boldsymbol{c} \in \mathcal{C}}
f_{\boldsymbol{Y} \mid \boldsymbol{C}} f_{\boldsymbol{Y} \mid \boldsymbol{C}}
\left( \boldsymbol{y} \mid \boldsymbol{c} \right)% \left( \boldsymbol{y} \mid \boldsymbol{c} \right)%
\label{eq:lp:ml} \label{eq:lp:ml}
@ -192,7 +192,7 @@ Assuming a memoryless channel, equation (\ref{eq:lp:ml}) can be rewritten in ter
of the \acp{LLR} $\gamma_i$ \cite[Sec. 2.5]{feldman_thesis}:% of the \acp{LLR} $\gamma_i$ \cite[Sec. 2.5]{feldman_thesis}:%
% %
\begin{align*} \begin{align*}
\hat{\boldsymbol{c}} = \argmin_{\boldsymbol{c}\in\mathcal{C}} \hat{\boldsymbol{c}}_{\text{\ac{ML}}} = \argmin_{\boldsymbol{c}\in\mathcal{C}}
\sum_{i=1}^{n} \gamma_i c_i,% \sum_{i=1}^{n} \gamma_i c_i,%
\hspace{5mm} \gamma_i = \ln\left( \hspace{5mm} \gamma_i = \ln\left(
\frac{f_{Y_i | C_i} \left( y_i \mid C_i = 0 \right) } \frac{f_{Y_i | C_i} \left( y_i \mid C_i = 0 \right) }
@ -706,46 +706,48 @@ In contrast to \ac{LP} decoding, the objective function is based on a
non-convex optimization formulation of the \ac{MAP} decoding problem. non-convex optimization formulation of the \ac{MAP} decoding problem.
In order to derive the objective function, the authors begin with the In order to derive the objective function, the authors begin with the
\ac{MAP} decoding rule, expressed as a continuous minimization problem over \ac{MAP} decoding rule, expressed as a continuous maximization problem%
$\boldsymbol{x}$:% \footnote{The }%
:%
% %
\begin{align} \begin{align}
\hat{\boldsymbol{x}} = \argmax_{\boldsymbol{x} \in \mathbb{R}^{n}} \hat{\boldsymbol{x}} = \argmax_{\tilde{\boldsymbol{x}} \in \mathbb{R}^{n}}
f_{\boldsymbol{X} \mid \boldsymbol{Y}} f_{\tilde{\boldsymbol{X}} \mid \boldsymbol{Y}}
\left( \boldsymbol{x} \mid \boldsymbol{y} \right) \left( \tilde{\boldsymbol{x}} \mid \boldsymbol{y} \right)
= \argmax_{\boldsymbol{x} \in \mathbb{R}^{n}} f_{\boldsymbol{Y} \mid \boldsymbol{X}} = \argmax_{\tilde{\boldsymbol{x}} \in \mathbb{R}^{n}} f_{\boldsymbol{Y}
\left( \boldsymbol{y} \mid \boldsymbol{x} \right) \mid \tilde{\boldsymbol{X}}}
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)% \left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right)
f_{\tilde{\boldsymbol{X}}}\left( \tilde{\boldsymbol{x}} \right)%
\label{eq:prox:vanilla_MAP} \label{eq:prox:vanilla_MAP}
.\end{align}% .\end{align}%
% %
The likelihood $f_{\boldsymbol{Y} \mid \boldsymbol{X}} The likelihood $f_{\boldsymbol{Y} \mid \tilde{\boldsymbol{X}}}
\left( \boldsymbol{y} \mid \boldsymbol{x} \right) $ is a known function \left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right) $ is a known function
determined by the channel model. determined by the channel model.
The prior \ac{PDF} $f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)$ is also The prior \ac{PDF} $f_{\tilde{\boldsymbol{X}}}\left( \tilde{\boldsymbol{x}} \right)$ is also
known as the equal probability assumption is made on known, as the equal probability assumption is made on
$\mathcal{C}\left( \boldsymbol{H} \right)$. $\mathcal{C}\left( \boldsymbol{H} \right)$.
However, since the considered domain is continuous, However, since the considered domain is continuous,
the prior \ac{PDF} cannot be ignored as a constant during the minimization the prior \ac{PDF} cannot be ignored as a constant during the minimization
as is often done, and has a rather unwieldy representation:% as is often done, and has a rather unwieldy representation:%
% %
\begin{align} \begin{align}
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right) = f_{\tilde{\boldsymbol{X}}}\left( \tilde{\boldsymbol{x}} \right) =
\frac{1}{\left| \mathcal{C} \right| } \frac{1}{\left| \mathcal{C} \right| }
\sum_{\boldsymbol{c} \in \mathcal{C} } \sum_{\boldsymbol{c} \in \mathcal{C} }
\delta\left( \boldsymbol{x} - \left( -1 \right) ^{\boldsymbol{c}}\right) \delta\left( \tilde{\boldsymbol{x}} - \left( -1 \right) ^{\boldsymbol{c}}\right)
\label{eq:prox:prior_pdf} \label{eq:prox:prior_pdf}
.\end{align}% .\end{align}%
% %
In order to rewrite the prior \ac{PDF} In order to rewrite the prior \ac{PDF}
$f_{\boldsymbol{X}}\left( \boldsymbol{x} \right)$, $f_{\tilde{\boldsymbol{X}}}\left( \tilde{\boldsymbol{x}} \right)$,
the so-called \textit{code-constraint polynomial} is introduced as:% the so-called \textit{code-constraint polynomial} is introduced as:%
% %
\begin{align*} \begin{align*}
h\left( \boldsymbol{x} \right) = h\left( \tilde{\boldsymbol{x}} \right) =
\underbrace{\sum_{i=1}^{n} \left( x_i^2-1 \right) ^2}_{\text{Bipolar constraint}} \underbrace{\sum_{i=1}^{n} \left( \tilde{x_i}^2-1 \right) ^2}_{\text{Bipolar constraint}}
+ \underbrace{\sum_{j=1}^{m} \left[ + \underbrace{\sum_{j=1}^{m} \left[
\left( \prod_{i\in N \left( j \right) } x_i \right) \left( \prod_{i\in N \left( j \right) } \tilde{x_i} \right)
-1 \right] ^2}_{\text{Parity constraint}}% -1 \right] ^2}_{\text{Parity constraint}}%
.\end{align*}% .\end{align*}%
% %
@ -758,8 +760,8 @@ constraints, accommodating the role of the parity-check matrix $\boldsymbol{H}$.
The prior \ac{PDF} is then approximated using the code-constraint polynomial as:% The prior \ac{PDF} is then approximated using the code-constraint polynomial as:%
% %
\begin{align} \begin{align}
f_{\boldsymbol{X}}\left( \boldsymbol{x} \right) f_{\tilde{\boldsymbol{X}}}\left( \tilde{\boldsymbol{x}} \right)
\approx \frac{1}{Z}\mathrm{e}^{-\gamma h\left( \boldsymbol{x} \right) }% \approx \frac{1}{Z}\mathrm{e}^{-\gamma h\left( \tilde{\boldsymbol{x}} \right) }%
\label{eq:prox:prior_pdf_approx} \label{eq:prox:prior_pdf_approx}
.\end{align}% .\end{align}%
% %
@ -769,35 +771,36 @@ $\gamma \rightarrow \infty$, the approximation in equation
(\ref{eq:prox:prior_pdf}). (\ref{eq:prox:prior_pdf}).
This approximation can then be plugged into equation (\ref{eq:prox:vanilla_MAP}) This approximation can then be plugged into equation (\ref{eq:prox:vanilla_MAP})
and the likelihood can be rewritten using the negative log-likelihood and the likelihood can be rewritten using the negative log-likelihood
$L \left( \boldsymbol{y} \mid \boldsymbol{x} \right) = -\ln\left( $L \left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right) = -\ln\left(
f_{\boldsymbol{Y} \mid \boldsymbol{X}}\left( f_{\boldsymbol{Y} \mid \tilde{\boldsymbol{X}}}\left(
\boldsymbol{y} \mid \boldsymbol{x} \right) \right) $:% \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right) \right) $:%
% %
\begin{align*} \begin{align*}
\hat{\boldsymbol{x}} &= \argmax_{\boldsymbol{x} \in \mathbb{R}^{n}} \hat{\boldsymbol{x}} &= \argmax_{\tilde{\boldsymbol{x}} \in \mathbb{R}^{n}}
\mathrm{e}^{- L\left( \boldsymbol{y} \mid \boldsymbol{x} \right) } \mathrm{e}^{- L\left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right) }
\mathrm{e}^{-\gamma h\left( \boldsymbol{x} \right) } \\ \mathrm{e}^{-\gamma h\left( \tilde{\boldsymbol{x}} \right) } \\
&= \argmin_{\boldsymbol{x} \in \mathbb{R}^n} \left( &= \argmin_{\tilde{\boldsymbol{x}} \in \mathbb{R}^n} \left(
L\left( \boldsymbol{y} \mid \boldsymbol{x} \right) L\left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right)
+ \gamma h\left( \boldsymbol{x} \right) + \gamma h\left( \tilde{\boldsymbol{x}} \right)
\right)% \right)%
.\end{align*}% .\end{align*}%
% %
Thus, with proximal decoding, the objective function Thus, with proximal decoding, the objective function
$g\left( \boldsymbol{x} \right)$ considered is% $g\left( \tilde{\boldsymbol{x}} \right)$ considered is%
% %
\begin{align} \begin{align}
g\left( \boldsymbol{x} \right) = L\left( \boldsymbol{y} \mid \boldsymbol{x} \right) g\left( \tilde{\boldsymbol{x}} \right) = L\left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}}
+ \gamma h\left( \boldsymbol{x} \right)% \right)
+ \gamma h\left( \tilde{\boldsymbol{x}} \right)%
\label{eq:prox:objective_function} \label{eq:prox:objective_function}
\end{align}% \end{align}%
% %
and the decoding problem is reformulated to% and the decoding problem is reformulated to%
% %
\begin{align*} \begin{align*}
\text{minimize}\hspace{2mm} &L\left( \boldsymbol{y} \mid \boldsymbol{x} \right) \text{minimize}\hspace{2mm} &L\left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right)
+ \gamma h\left( \boldsymbol{x} \right)\\ + \gamma h\left( \tilde{\boldsymbol{x}} \right)\\
\text{subject to}\hspace{2mm} &\boldsymbol{x} \in \mathbb{R}^n \text{subject to}\hspace{2mm} &\tilde{\boldsymbol{x}} \in \mathbb{R}^n
.\end{align*} .\end{align*}
% %
@ -825,11 +828,11 @@ $\gamma h\left( \boldsymbol{x} \right) $ has to be computed.
It is then immediately approximated with gradient-descent:% It is then immediately approximated with gradient-descent:%
% %
\begin{align*} \begin{align*}
\text{prox}_{\gamma h} \left( \boldsymbol{x} \right) &\equiv \text{prox}_{\gamma h} \left( \tilde{\boldsymbol{x}} \right) &\equiv
\argmin_{\boldsymbol{t} \in \mathbb{R}^n} \argmin_{\boldsymbol{t} \in \mathbb{R}^n}
\left( \gamma h\left( \boldsymbol{t} \right) + \left( \gamma h\left( \boldsymbol{t} \right) +
\frac{1}{2} \lVert \boldsymbol{t} - \boldsymbol{x} \rVert \right)\\ \frac{1}{2} \lVert \boldsymbol{t} - \tilde{\boldsymbol{x}} \rVert \right)\\
&\approx \boldsymbol{r} - \gamma \nabla h \left( \boldsymbol{r} \right), &\approx \tilde{\boldsymbol{x}} - \gamma \nabla h \left( \tilde{\boldsymbol{x}} \right),
\hspace{5mm} \gamma > 0, \text{ small} \hspace{5mm} \gamma > 0, \text{ small}
.\end{align*}% .\end{align*}%
% %
@ -862,12 +865,15 @@ according to the decoding performance \cite[Sec. 3.1]{proximal_paper}.
%\todo{$x_k$: $k$ or some other indexing variable?}% %\todo{$x_k$: $k$ or some other indexing variable?}%
%% %%
In the case of \ac{AWGN}, the likelihood In the case of \ac{AWGN}, the likelihood
$f_{\boldsymbol{Y} \mid \boldsymbol{X}}\left( \boldsymbol{y} \mid \boldsymbol{x} \right)$ $f_{\boldsymbol{Y} \mid \tilde{\boldsymbol{X}}}
\left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right)$
is% is%
% %
\begin{align*} \begin{align*}
f_{\boldsymbol{Y} \mid \boldsymbol{X}}\left( \boldsymbol{y} \mid \boldsymbol{x} \right) f_{\boldsymbol{Y} \mid \tilde{\boldsymbol{X}}}
= \frac{1}{\sqrt{2\pi\sigma^2}}\mathrm{e}^{-\frac{\lVert \boldsymbol{y}-\boldsymbol{x} \left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right)
= \frac{1}{\sqrt{2\pi\sigma^2}}\mathrm{e}^{
-\frac{\lVert \boldsymbol{y}-\tilde{\boldsymbol{x}}
\rVert^2 } \rVert^2 }
{2\sigma^2}} {2\sigma^2}}
.\end{align*} .\end{align*}
@ -877,9 +883,9 @@ Thus, the gradient of the negative log-likelihood becomes%
it suffices to consider only proportionality instead of equality.}% it suffices to consider only proportionality instead of equality.}%
% %
\begin{align*} \begin{align*}
\nabla L \left( \boldsymbol{y} \mid \boldsymbol{x} \right) \nabla L \left( \boldsymbol{y} \mid \tilde{\boldsymbol{x}} \right)
&\propto -\nabla \lVert \boldsymbol{y} - \boldsymbol{x} \rVert^2\\ &\propto -\nabla \lVert \boldsymbol{y} - \tilde{\boldsymbol{x}} \rVert^2\\
&\propto \boldsymbol{x} - \boldsymbol{y} &\propto \tilde{\boldsymbol{x}} - \boldsymbol{y}
,\end{align*}% ,\end{align*}%
% %
allowing equation \ref{eq:prox:step_log_likelihood} to be rewritten as% allowing equation \ref{eq:prox:step_log_likelihood} to be rewritten as%

View File

@ -21,7 +21,7 @@ Lastly, the optimization methods utilized are described.
\begin{itemize} \begin{itemize}
\item General remarks on notation (matrices, \ldots) \item General remarks on notation (matrices, \ldots)
\item Probabilistic quantities (random variables, \acp{PDF}, \ldots) \item Probabilistic quantities (random variables, \acp{PDF}, pdfs vs pmfs vs cdfs, \ldots)
\end{itemize} \end{itemize}