diff --git a/src/thesis/acronyms.tex b/src/thesis/acronyms.tex index 0267980..f3b3ba9 100644 --- a/src/thesis/acronyms.tex +++ b/src/thesis/acronyms.tex @@ -8,6 +8,11 @@ long=belief propagation } +\DeclareAcronym{bpgd}{ + short=BPGD, + long=belief propagation with guided decimation +} + \DeclareAcronym{nms}{ short=NMS, long=normalized min-sum diff --git a/src/thesis/chapters/2_fundamentals.tex b/src/thesis/chapters/2_fundamentals.tex index 397d719..f60f9ae 100644 --- a/src/thesis/chapters/2_fundamentals.tex +++ b/src/thesis/chapters/2_fundamentals.tex @@ -517,6 +517,7 @@ This is precisely the effect that leads to the good performance of \ac{sc}-\ac{ldpc} codes in the waterfall region \cite{costello_spatially_2014}. \subsection{Iterative Decoding} +\label{subsec:Iterative Decoding} % Introduction @@ -1373,12 +1374,10 @@ We can describe it using the check matrix \right] .% \end{align} -We can understand each row as defining a stabilizer. +% TODO: Check X vs. Z The first $n$ columns correspond to $X$ operators acting on the corresponding physical qubit, the rest to the $Z$ operators. -% TODO: Write - \begin{figure}[t] \centering @@ -1400,26 +1399,6 @@ corresponding physical qubit, the rest to the $Z$ operators. \label{fig:sec} \end{figure} -% %%%%%%%%%%%%%%%% -% \subsection{Decoding Stabilizer Codes} -% \label{subsec:Decoding Stabilizer Codes} -% -% -% -% \noindent\indent\red{[The QEC decoding problem -% \cite[Sec.~2.3]{yao_belief_2024}]} \\ -% \indent\red{[+ Degeneracy]} \\ -% \indent\red{[``The task of decoding is therefore to infer, from a -% measured syndrome, the most likely error coset rather than the exact -% physical error.'' -% % tex-fmt: off -% \cite[Sec.~II~B)]{koutsioumpas_colour_2025}% -% % tex-fmt: on -% ]} \\ -% \indent\red{[Fixing the error after finding it -% \cite[Sec.~10.5.5]{nielsen_quantum_2010} -> This -% may require introducing the gates as unitary]} - %%%%%%%%%%%%%%%% \subsection{Calderbank-Shor-Steane Codes} \label{subsec:Calderbank-Shor-Steane Codes} @@ -1477,14 +1456,17 @@ $\mathcal{C}_2 \subset \mathcal{C}_1$. Various methods of constructing \ac{qec} codes exist \cite{swierkowska_eccentric_2025}. -\red{[topological codes]} -\red{[(?) Mention color and surface codes]}. -A more recent development is that of quantum \ac{ldpc} (\acs{qldpc}) codes. - -% Why QLDPC codes are interesting - -\indent\red{[Constant overhead scaling]} \\ -\indent\red{[Scaling of minimum distance with code length]} \\ +Topological codes, for example, encode information in the features of +a lattice and are intrinsically robust against local errors. +Among these, the \emph{surface code} is the most widely studied. +Another example are concatenated codes, which nest one code within +another, allowing for especially simple and flexible constructions +\cite[Sec.~3.2]{swierkowska_eccentric_2025}. +An area of research that has recently seen more attention is that of +quantum \ac{ldpc} (\acs{qldpc}) codes. +They have much better encoding efficiency than, e.g., the surface +code, scaling up of which would be prohibitively expensive +\cite[Sec.~I]{bravyi_high-threshold_2024}. % Bivariate Bicycle codes @@ -1527,9 +1509,168 @@ Additionally, they posess short-depth syndrome measurement circuits, leading to lower time requirements for the syndrome extraction and thus lower error rates \cite[Sec.~1]{bravyi_high-threshold_2024}. -% Decoding QLDPC codes +% Syndrome-based BP -\indent\red{[Decoding QLDPC codes (syndrome-based BP)]} \\ -\indent\red{[Short cycles]} \\ -\indent\red{[Degeneracy + short cycles -> BP+OSD, BPGD]} \\ +As we saw in \autoref{subsec:Stabilizer Measurements}, we work only +with the parity information contained in the syndrome, to avoid +disturbing the quantum states of indivudual qubits. +This necessitates a modification of the standard \ac{bp} algorithm +introduced in \autoref{subsec:Iterative Decoding} +\cite[Sec.~3.1]{yao_belief_2024}. +Instead of attempting to find the most likely codeword directly, the +algorithm will now try to find an error pattern $\hat{\bm{e}} \in +\mathbb{F}_2^n$ that satisfies +\begin{align*} + \bm{H} \hat{\bm{e}}^\text{T} = \bm{s} + .% +\end{align*} +To this end, we initialize the channel \acp{llr} as +\begin{align*} + \tilde{L}_i = \log{\frac{P(X_i = 0)}{P(X_i = 1)}} = \frac{1 - p_i}{p_i} + ,% +\end{align*} +where $p_i$ is the prior probability of error of \ac{vn} $i$. +Additionally, we amend the \ac{cn} update to consider the parity +indicated by the syndrome, calculating +\begin{align*} + L_{i\leftarrow j} = 2\cdot (-1)^{s_j} \cdot \tanh^{-1} \left( \prod_{i'\in + \mathcal{N}(j)\setminus \{i\}} \tanh \frac{L_{i'\rightarrow j}}{2} \right) + . +\end{align*} +The resulting syndrome-based \ac{bp} algorithm is shown in +algorithm \ref{alg:syndome_bp}. + +% tex-fmt: off +\tikzexternaldisable +\begin{algorithm}[t] + \caption{Binary syndrome-based belief propagation (BP) algorithm.} + \label{alg:syndome_bp} + \begin{algorithmic}[1] + \State \textbf{Initialize:} $\tilde{L}_i \leftarrow + \log \frac{1-p_i}{p_i}$ for all $i \in \mathcal{I}$ + \State \textbf{Initialize:} $L_{i \rightarrow j} \leftarrow + \tilde{L}_i$ for all $i \in \mathcal{I},\, j \in \mathcal{N}_\text{V}(i)$ + \State \textbf{Initialize:} $\hat{e} \leftarrow \bm{0}$ + + \For{$\ell = 1, \ldots, n_\text{iter}$} + + \For{$j \in \mathcal{J}$} + \For{$i \in \mathcal{N}_\text{C}(j)$} + \State $\displaystyle L_{i \leftarrow j} \leftarrow + 2\cdot(-1)^{s_j}\cdot\tanh^{-1} + \!\left( + \prod_{i' \in \mathcal{N}_\text{C}(j)\setminus\{i\}} + \tanh\frac{L_{i'\rightarrow j}}{2} + \right)$ + \EndFor + \EndFor + + \For{$i \in \mathcal{I}$} + \For{$j \in \mathcal{N}_\text{V}(i)$} + \State $\displaystyle L_{i \rightarrow j} \leftarrow + \tilde{L}_i + + \sum_{j' \in \mathcal{N}_\text{V}(i)\setminus\{j\}} + L_{i \leftarrow j'}$ + \EndFor + \EndFor + + \For{$i \in \mathcal{I}$} + \State $\displaystyle \hat{e}_i \leftarrow + \mathbbm{1}\left\{ + \tilde{L}_i + + \sum_{j \in \mathcal{N}_\text{V}(i)} L_{i \leftarrow j} < 0 + \right\}$ + \EndFor + + \If{$\bm{H}\hat{\bm{e}}^\text{T} = \bm{s}$} + \State \textbf{break} + \EndIf + + \EndFor + \State \textbf{return} $\hat{\bm{e}}$ + \end{algorithmic} +\end{algorithm} +\tikzexternalenable +% tex-fmt: on + +% Degeneracy and short cycles + +Decoding \ac{qldpc} codes brings with it some unique challenges. +One issue is that of \emph{quantum degeneracy}. +Because errors that differ by a stabilizer have the same impact on +all codewords, there can be multiple minimum-weight solutions to the +quantum decoding problem \cite[Sec.~II.C.]{babar_fifteen_2015} +\cite[Sec.~V]{roffe_decoding_2020}. +This leads to the decoding algorithm getting confused about the +direction to proceed in \cite[Sec.~5]{yao_belief_2024}. +Another problem is that due to the commutativity property of the stabilizers, +quantum codes inherently contain short cycles +\cite[Sec.~IV.C]{babar_fifteen_2015}. +As discussed in \autoref{subsec:Iterative Decoding}, these lead to +the violation of the independence assumption of the messages passed +during decoding, impeding performance. + +% BPGD + +The aforementioned issues both manifest themselves as convergence problems +of the \ac{bp} algorithm, and different ways of modifying the algorithm +to aide with convergence exist. +One approach is to use \ac{bp} with guided decimation (\acs{bpgd}) +\cite[Alg.~1]{yao_belief_2024}. +Here, a number $T\in \mathbb{N}$ of \ac{bp} iterations are performed, +before \emph{decimating} the most reliable \ac{vn}, i.e., performing +a hard decision and excluding it from further decoding. +This constrains the solution space more and more as the decoding +progresses, encouraging the algorithm to converge to one of the +solutions \cite[Sec.~5]{yao_belief_2024}. +Algorithm \ref{alg:bpgd} shows this process. +Note that as the Tanner graph only has $n$ \acp{vn}, this is a +natural constraint on the maximum number of iterations. + +% TODO: Explain that setting the channel LLR to infinity is the same +% as a hard decision and ignoring the VN in the further decoding +% tex-fmt: off +\tikzexternaldisable +\begin{algorithm}[t] + \caption{Belief propagation with guided decimation (BPGD) algorithm.} + \label{alg:bpgd} + \begin{algorithmic}[1] + \State \textbf{Initialize:} $\tilde{L}_i \leftarrow + \log \frac{1-p_i}{p_i}$ for all $i \in \mathcal{I}$ + \State \textbf{Initialize:} $L_{i \rightarrow j} \leftarrow + \tilde{L}_i$ for all $i \in \mathcal{I},\, j \in \mathcal{N}_\text{V}(i)$ + \State \textbf{Initialize:} $\hat{e} \leftarrow \bm{0}$ + \State \textbf{Initialize:} $\mathcal{I}' \leftarrow \mathcal{I}$ + + \For{$r = 1, \ldots, n$} + + \For{$\ell = 1, \ldots, T$} + \State Perform \ac{cn} update + \State Perform \ac{vn} update + \State $L^\text{total}_i \leftarrow \tilde{L}_i + \sum_{j \in \mathcal{N}_\text{V}(i)} L_{i \leftarrow j}$ + \EndFor + + \For{$i \in \mathcal{I}$} + \State $\displaystyle \hat{e}_i \leftarrow + \mathbbm{1}\left\{ L^\text{total}_i \right\}$ + \EndFor + + \If{$\bm{H}\hat{\bm{e}}^\text{T} = \bm{s}$} + \State \textbf{break} + \Else + \State $i_\text{max} \leftarrow \argmax_{i \in \mathcal{I}'} \lvert L^\text{total}_i \rvert $ + \If{$L^\text{total}_{i_\text{max}} < 0$} + \State $\tilde{L}_{i_\text{max}} \leftarrow -\infty$ + \Else + \State $\tilde{L}_{i_\text{max}} \leftarrow +\infty$ + \EndIf + \State $\mathcal{I}' \leftarrow \mathcal{I}'\setminus\{i_\text{max}\}$ + \EndIf + + \EndFor + \State \textbf{return} $\hat{\bm{e}}$ + \end{algorithmic} +\end{algorithm} +\tikzexternalenable +% tex-fmt: on diff --git a/src/thesis/main.tex b/src/thesis/main.tex index 95c77ca..032ade0 100644 --- a/src/thesis/main.tex +++ b/src/thesis/main.tex @@ -6,12 +6,15 @@ \usepackage{amsfonts} \usepackage{mleftright} \usepackage{bm} +\usepackage{bbm} \usepackage{tikz} \usepackage{xcolor} \usepackage{pgfplots} \pgfplotsset{compat=newest} \usepackage{acro} \usepackage{braket} +\usepackage{listings} +\usepackage{caption} % \usepackage[ % backend=biber, % style=ieee, @@ -20,9 +23,10 @@ \usepackage{todonotes} \usepackage{quantikz} \usepackage{stmaryrd} +\usepackage{algorithm} +\usepackage[noEnd=false]{algpseudocodex} \usetikzlibrary{calc, positioning, arrows, fit} - \usetikzlibrary{external} \tikzexternalize