Update bibliography, phrasing, add outlines for sections

This commit is contained in:
2026-03-27 00:15:39 +01:00
parent 168688e9a0
commit 1aa45bd741
2 changed files with 83 additions and 27 deletions

View File

@@ -1492,3 +1492,40 @@ We study the performance of medium-length quantum LDPC (QLDPC) codes in the depo
keywords = {/unread},
file = {Full Text PDF:/home/andreas/workspace/work/hiwi/Zotero/storage/PRCEXIWQ/Gallager - 1960 - Low density parity check codes.pdf:application/pdf},
}
@inproceedings{hassan_fully_2016,
title = {Fully parallel window decoder architecture for spatially-coupled {LDPC} codes},
issn = {1938-1883},
url = {https://ieeexplore.ieee.org/abstract/document/7511553},
doi = {10.1109/ICC.2016.7511553},
abstract = {Spatially-coupled low-density parity-check (SC-LDPC) codes have been shown to be superior in performance than LDPC block codes. In order to comply with the practical constraints on latency, SC-LDPC codes are decoded using a window decoder that reduces the decoder latency and complexity compared to traditional block-wise decoding. However, so far the literature only considers the structural decoding latency of window decoder, ignoring the processing latency. Note that the processing latency directly impacts the decoder's throughput and is an important parameter in any modern communication system. The throughput of an iterative decoder is directly influenced by the number of iterations and hence in this paper we propose a fully parallel window decoder architecture for SC-LDPC codes where the decoding iterations are performed in parallel. This guarantees a high throughout while fulfilling the low latency requirements. The overall decoding latency (structural and processing latency) of the proposed window decoder architecture is compared with the classical window decoder.},
urldate = {2026-03-26},
booktitle = {2016 {IEEE} {International} {Conference} on {Communications} ({ICC})},
author = {Hassan, Najeeb Ul and Schlüter, Martin and Fettweis, Gerhard P.},
month = may,
year = {2016},
note = {ISSN: 1938-1883},
keywords = {/unread, Block codes, Complexity theory, Decoding, Iterative decoding, Sparse matrices, Throughput},
pages = {1--6},
file = {Full Text PDF:/home/andreas/workspace/work/hiwi/Zotero/storage/TRN7GLTA/Hassan et al. - 2016 - Fully parallel window decoder architecture for spatially-coupled LDPC codes.pdf:application/pdf},
}
@article{costello_spatially_2014,
title = {Spatially coupled sparse codes on graphs: theory and practice},
volume = {52},
issn = {1558-1896},
shorttitle = {Spatially coupled sparse codes on graphs},
url = {https://ieeexplore.ieee.org/document/6852099},
doi = {10.1109/MCOM.2014.6852099},
abstract = {Since the discovery of turbo codes 20 years ago and the subsequent rediscovery of low-density parity check codes a few years later, the field of channel coding has experienced a number of major advances. Until that time, code designers were usually happy with performance that came within a few decibels of the Shannon Limit, primarily due to implementation complexity constraints, whereas the new coding techniques now allow performance within a small fraction of a decibel of capacity with modest encoding and decoding complexity. Due to these significant improvements, coding standards in applications as varied as wireless mobile transmission, satellite TV, and deep space communication are being updated to incorporate the new techniques. In this article, we review a particularly exciting new class of low-density parity check codes called spatially coupled codes, which promise excellent performance over a broad range of channel conditions and decoded error rate requirements.},
number = {7},
urldate = {2026-03-26},
journal = {IEEE Communications Magazine},
author = {Costello, Daniel J. and Dolecek, Lara and Fuja, Thomas E. and Kliewer, Jorg and Mitchell, David G.M. and Smarandache, Roxana},
month = jul,
year = {2014},
note = {TLDR: This article reviews a particularly exciting new class of low-density parity check codes called spatially coupled codes, which promise excellent performance over a broad range of channel conditions and decoded error rate requirements.},
keywords = {/unread, Block codes, Convolutional codes, Decoding, Iterative decoding, Sparse matrices},
pages = {168--176},
file = {Full Text PDF:/home/andreas/workspace/work/hiwi/Zotero/storage/WH3R5BMN/Costello et al. - 2014 - Spatially coupled sparse codes on graphs theory and practice.pdf:application/pdf},
}

View File

@@ -21,7 +21,8 @@ reduced error rate.
Specifically, Shannon proved in 1948 that for any channel, a block
code can be found that achieves arbitrarily small probability of
error at any communication rate up to the capacity of the channel
when the block length approaches infinity \cite{shannon_mathematical_1948}.
when the block length approaches infinity
\cite[Sec.~13]{shannon_mathematical_1948}.
In this section, we explore the concepts of ``classical'' (as in non-quantum)
error correction that are central to this work.
@@ -52,7 +53,7 @@ $\bm{u} \in \mathbb{F}_2^k$ of length $k \in \mathbb{N}$ (called the
A measure of the amount of introduced redundancy is the \textit{code
rate} $R = k/n$.
We call the set of all codewords $\mathcal{C}$ the \textit{code}
\cite[Sec. 3.1]{ryan_channel_2009}.
\cite[Sec.~3.1.1]{ryan_channel_2009}.
%
% d_min and the [] Notation
@@ -69,14 +70,14 @@ $\bm{x}_2$ can be expressed using the \textit{Hamming distance} $d(\bm{x}_1,
We define the \textit{minimum distance} of a code $\mathcal{C}$ as
%
\begin{align*}
d_\text{min} = \min \left\{ d(\bm{x}_1, \bm{x}_2) : \bm{x}_1,
d_\text{min} := \min \left\{ d(\bm{x}_1, \bm{x}_2) : \bm{x}_1,
\bm{x}_2 \in \mathcal{C}, \bm{x}_1 \neq \bm{x}_2 \right\}
.
\end{align*}
%
We can signify that a binary linear block code has information length
$k$, block length $n$ and minimum distance $d_\text{min}$ using the
notation $[n,k,d_\text{dmin}]$ \cite[Sec. 1.3]{macwilliams_theory_1977}.
notation $[n,k,d_\text{dmin}]$ \cite[Sec.~1.3]{macwilliams_theory_1977}.
%
% Parity checks, H, and the syndrome
@@ -90,16 +91,19 @@ Since $\lvert \mathcal{C} \rvert = 2^k$ and $\lvert \mathbb{F}_2^n
additional degrees of freedom.
These conditions, called parity checks, take the form of equations
over $\mathbb{F}_2^n$, linking the individual positions of each codeword.
We can arrange the coefficients of these equations in the
We can arrange the coefficients of these equations in a
\textit{parity-check matrix} (\acs{pcm}) $\bm{H} \in
\mathbb{F}_2^{(n-k) \times n}$ and equivalently define the code as
\cite[Sec. 3.1]{ryan_channel_2009}
\cite[Sec.~3.1.1]{ryan_channel_2009}
%
\begin{align*}
\mathcal{C} = \left\{ \bm{x} \in \mathbb{F}_2^n :
\bm{H}\bm{x}^\text{T} = \bm{0} \right\}
.%
\end{align*}
Note that in general we may have linearly dependent parity checks,
prompting us to define the \ac{pcm} as $\bm{H} \in
\mathbb{F}_2^{m\times n}$ with $\hspace{2mm} m \ge n-k$ instead.
% TODO: Define m
%
The \textit{syndrome} $\bm{s} = \bm{H} \bm{v}^\text{T}$ describes
@@ -113,15 +117,14 @@ exponentially with $n$, in contrast to keeping track of all codewords directly.
%
Figure \ref{fig:Diagram of a transmission system} visualizes the
entire communication process \cite[Sec. 1.1]{ryan_channel_2009}.
communication process \cite[Sec.~1.1]{ryan_channel_2009}.
An input message $\bm{u}\in \mathbb{F}_2^k$ is mapped onto a codeword $\bm{x}
\in \mathbb{F}_2^n$. This is passed on to a modulator, which
interacts with the physical channel.
A demodulator processes the received message and forwards the result
A demodulator processes the channel output and forwards the result
$\bm{y} \in \mathbb{R}^n$ to a decoder.
Finally, the decoder is responsible for obtaining an estimate
$\hat{\bm{u}} \in \mathbb{F}_2^k$ of the original input message from the
received message.
$\hat{\bm{u}} \in \mathbb{F}_2^k$ of the original input message.
This is done by first finding an estimate $\hat{\bm{x}}$ of the sent
codeword and undoing the encoding.
The decoding problem that we generally attempt to solve thus consists
@@ -133,9 +136,9 @@ One approach is to use the \ac{ml} criterion \cite[Sec.
P(\bm{Y} = \bm{y} \vert \bm{X} = \bm{x})
.
\end{align*}
Finally, we differentiate between \textit{soft decision} decoding, where
$\bm{y} \in \mathbb{R}^n$, and \textit{hard decision} decoding, where
$\bm{y} \in \mathbb{F}_2^n$ \cite[Sec. 1.5.1.3]{ryan_channel_2009}.
Finally, we differentiate between \textit{soft-decision} decoding, where
$\bm{y} \in \mathbb{R}^n$, and \textit{hard-decision} decoding, where
$\bm{y} \in \mathbb{F}_2^n$ \cite[Sec.~1.5.1.3]{ryan_channel_2009}.
%
\begin{figure}[h]
\centering
@@ -199,7 +202,7 @@ This is exactly the motivation behind \ac{ldpc} codes \cite[Ch.
%
\ac{ldpc} codes belong to a class sometimes referred to as ``modern codes''.
These differ from ``classical codes'' in their decoding algorithm:
These differ from ``classical codes'' in their decoding algorithms:
Classical codes are usually decoded using one-step hard-decision decoding,
whereas modern codes are suitable for iterative soft-decision
decoding \cite[Preface]{ryan_channel_2009}. The iterative decoding algorithms
@@ -209,8 +212,8 @@ graph that constitues an alternative representation of the \ac{pcm}.
We define two types of nodes: \acp{vn}, corresponding to codeword
bits, and \acp{cn}, corresponding to individual parity checks.
We then construct the Tanner graph by connecting each \ac{cn} to
the \acp{vn} that make up the corresponding parity check \cite[Ch.
5]{ryan_channel_2009}.
the \acp{vn} that make up the corresponding parity check
\cite[Sec.~5.1.2]{ryan_channel_2009}.
Figure \ref{PCM and Tanner graph of the Hamming code} shows this
construction for the [7,4,3]-Hamming code.
%
@@ -290,30 +293,46 @@ the neighborhood of a varialbe node $i$ as
$\mathcal{N}_\text{V} (i) = \left\{ i \in \mathcal{I} : \bm{H}_{j,i}
= 1 \right\}$
and that of a check node $j$ as
$\mathcal{N}_\text{C} = \left\{ j \in \mathcal{J} : \bm{H}_{j,i} = 1 \right\}$.
$\mathcal{N}_\text{C} (j) = \left\{ j \in \mathcal{J} : \bm{H}_{j,i}
= 1 \right\}$.
\red{
\begin{itemize}
\item Cycles (? - Only if needed later)
\item Regular vs irregular (? - only if needed later)
\end{itemize}
}
% TODO: Do we need any of these?
% \red{
% \begin{itemize}
% \item Cycles (? - Only if needed later)
% \item Regular vs irregular (? - only if needed later)
% \end{itemize}
% }
\subsection{Spatially-Coupled LDPC Codes}
A relatively recent development in the world of \ac{ldpc} codes is
that of \ac{sc}-\ac{ldpc} codes.\\
\red{[a bit more history (developed by \ldots, developed from \ldots,
\ldots)]}\\
\red{[core concept]}
\red{
\begin{itemize}
\item Core idea
\item Mathematical description (H)
\item Tanner graph + PCM
\item Key benefits and reasoning behind them
\item Cite \cite{costello_spatially_2014} \cite{hassan_fully_2016}
\end{itemize}
}
\subsection{Belief Propagation}
\red{[short intro]} \\
\red{[key points (sub-optimal but good enough, low complexity, \ldots)]} \\
\red{[top-level overview (iterative algorithm that approximates \ldots)]}
\red{
\begin{itemize}
\item Core idea
\item BP for SC-LDPC codes
\item SPA and NMS algorithms
% TODO: Would it be better to split this into a separate section?
\item Sliding-window decoding of SC-LDPC codes
\item Cite \cite{ryan_channel_2009} \cite{hassan_fully_2016}
\cite{costello_spatially_2014}
\end{itemize}
}