12 Commits

8 changed files with 891 additions and 272 deletions

View File

@@ -3,6 +3,16 @@
long=quantum error correction
}
\DeclareAcronym{dem}{
short=DEM,
long=detector error model
}
\DeclareAcronym{ler}{
short=LER,
long=logical error rate
}
\DeclareAcronym{bp}{
short=BP,
long=belief propagation
@@ -18,6 +28,16 @@
long=normalized min-sum
}
\DeclareAcronym{osd}{
short=OSD,
long=ordered statistics decoding
}
\DeclareAcronym{aed}{
short=AED,
long=automorphism ensemble decoding
}
\DeclareAcronym{bsc}{
short=BSC,
long=binary symetric channel

View File

@@ -1070,3 +1070,63 @@
year = {2021},
pages = {605},
}
@article{chamberland_flag_2018,
title = {Flag fault-tolerant error correction with arbitrary distance codes},
volume = {2},
issn = {2521-327X},
doi = {10.22331/q-2018-02-08-53},
journal = {Quantum},
author = {Chamberland, Christopher and Beverland, Michael E.},
month = feb,
year = {2018},
pages = {53},
}
@article{chen_exponential_2021,
title = {Exponential suppression of bit or phase errors with cyclic error correction},
volume = {595},
copyright = {2021 The Author(s)},
issn = {1476-4687},
doi = {10.1038/s41586-021-03588-y},
language = {en},
number = {7867},
journal = {Nature},
publisher = {Nature Publishing Group},
author = {Chen, Zijun and Satzinger, Kevin J. and Atalaya, Juan and Korotkov, Alexander N. and Dunsworth, Andrew and Sank, Daniel and Quintana, Chris and McEwen, Matt and Barends, Rami and Klimov, Paul V. and Hong, Sabrina and Jones, Cody and Petukhov, Andre and Kafri, Dvir and Demura, Sean and Burkett, Brian and Gidney, Craig and Fowler, Austin G. and Paler, Alexandru and Putterman, Harald and Aleiner, Igor and Arute, Frank and Arya, Kunal and Babbush, Ryan and Bardin, Joseph C. and Bengtsson, Andreas and Bourassa, Alexandre and Broughton, Michael and Buckley, Bob B. and Buell, David A. and Bushnell, Nicholas and Chiaro, Benjamin and Collins, Roberto and Courtney, William and Derk, Alan R. and Eppens, Daniel and Erickson, Catherine and Farhi, Edward and Foxen, Brooks and Giustina, Marissa and Greene, Ami and Gross, Jonathan A. and Harrigan, Matthew P. and Harrington, Sean D. and Hilton, Jeremy and Ho, Alan and Huang, Trent and Huggins, William J. and Ioffe, L. B. and Isakov, Sergei V. and Jeffrey, Evan and Jiang, Zhang and Kechedzhi, Kostyantyn and Kim, Seon and Kitaev, Alexei and Kostritsa, Fedor and Landhuis, David and Laptev, Pavel and Lucero, Erik and Martin, Orion and McClean, Jarrod R. and McCourt, Trevor and Mi, Xiao and Miao, Kevin C. and Mohseni, Masoud and Montazeri, Shirin and Mruczkiewicz, Wojciech and Mutus, Josh and Naaman, Ofer and Neeley, Matthew and Neill, Charles and Newman, Michael and Niu, Murphy Yuezhen and OBrien, Thomas E. and Opremcak, Alex and Ostby, Eric and Pató, Bálint and Redd, Nicholas and Roushan, Pedram and Rubin, Nicholas C. and Shvarts, Vladimir and Strain, Doug and Szalay, Marco and Trevithick, Matthew D. and Villalonga, Benjamin and White, Theodore and Yao, Z. Jamie and Yeh, Ping and Yoo, Juhwan and Zalcman, Adam and Neven, Hartmut and Boixo, Sergio and Smelyanskiy, Vadim and Chen, Yu and Megrant, Anthony and Kelly, Julian and {Google Quantum AI}},
month = jul,
year = {2021},
pages = {383--387},
}
@article{kelly_state_2015,
title = {State preservation by repetitive error detection in a superconducting quantum circuit},
volume = {519},
issn = {0028-0836, 1476-4687},
doi = {10.1038/nature14270},
number = {7541},
journal = {Nature},
author = {Kelly, J. and Barends, R. and Fowler, A. G. and Megrant, A. and Jeffrey, E. and White, T. C. and Sank, D. and Mutus, J. Y. and Campbell, B. and Chen, Yu and Chen, Z. and Chiaro, B. and Dunsworth, A. and Hoi, I.-C. and Neill, C. and O'Malley, P. J. J. and Quintana, C. and Roushan, P. and Vainsencher, A. and Wenner, J. and Cleland, A. N. and Martinis, John M.},
month = mar,
year = {2015},
pages = {66--69},
}
@misc{bombin_modular_2023,
title = {Modular decoding: parallelizable real-time decoding for quantum computers},
shorttitle = {Modular decoding},
doi = {10.48550/arXiv.2303.04846},
publisher = {arXiv},
author = {Bomb{\'i}n, H{\'e}ctor and Dawson, Chris and Liu, Ye-Hua and Nickerson, Naomi and Pastawski, Fernando and Roberts, Sam},
month = mar,
year = {2023},
}
@misc{leverrier_decoding_2022,
title = {Decoding quantum {Tanner} codes},
doi = {10.48550/arXiv.2208.05537},
publisher = {arXiv},
author = {Leverrier, Anthony and Z{\'e}mor, Gilles},
month = dec,
year = {2022},
}

View File

@@ -99,14 +99,14 @@ prompting us to define the \ac{pcm} as $\bm{H} \in
The \textit{syndrome} $\bm{s} = \bm{H} \bm{v}^\text{T}$ describes
which parity checks a candidate codeword $\bm{v} \in \mathbb{F}_2^n$ violates.
The representation using the \ac{pcm} has the benefit of providing a
description of the code, the memory complexity of which doesn't grow
description of the code, the memory complexity of which does not grow
exponentially with $n$, in contrast to keeping track of all codewords directly.
%
% The decoding problem
%
Figure \ref{fig:Diagram of a transmission system} visualizes the
\Cref{fig:Diagram of a transmission system} visualizes the
communication process \cite[Sec.~1.1]{ryan_channel_2009}.
An input message $\bm{u}\in \mathbb{F}_2^k$ is mapped onto a codeword $\bm{x}
\in \mathbb{F}_2^n$. This is passed on to a modulator, which
@@ -197,7 +197,7 @@ bits, and \acp{cn}, corresponding to individual parity checks.
We then construct the Tanner graph by connecting each \ac{cn} to
the \acp{vn} that make up the corresponding parity check
\cite[Sec.~5.1.2]{ryan_channel_2009}.
Figure \ref{PCM and Tanner graph of the Hamming code} shows this
\Cref{PCM and Tanner graph of the Hamming code} shows this
construction for the [7,4,3]-Hamming code.
%
\begin{figure}[t]
@@ -286,7 +286,7 @@ $\mathcal{N}_\text{C} (j) = \left\{ i \in \mathcal{I} : \bm{H}_{j,i}
We typically evaluate the performance of LDPC codes using the
\ac{ber} or the \ac{fer} (a \textit{frame} referes to one whole
transmitted block in this context).
Considering an \ac{awgn} channel, \autoref{fig:ldpc-perf} shows a
Considering an \ac{awgn} channel, \Cref{fig:ldpc-perf} shows a
qualitative performance characteristic of an \ac{ldpc} code
\cite[Fig.~1]{costello_spatially_2014}. We talk of the
\textit{waterfall} and the \textit{error floor} regions.
@@ -415,7 +415,7 @@ This is achieved by connecting some \acp{vn} of one spatial position to
where $K \in \mathbb{N}$ is the \textit{coupling width} and $L \in
\mathbb{N}$ is the number of spatial positions.
This construction results in a Tanner graph as depicted in
\autoref{fig:sc-ldpc-tanner}.
\Cref{fig:sc-ldpc-tanner}.
\begin{figure}[t]
\centering
@@ -701,14 +701,14 @@ formula simplifies to the direct calculation of the expected value.
Let us now examine how the observable operator $\hat{Q}$ relates to
the determinate states of the observable quantity.
We begin by translating \autoref{eq:gen_expr_Q_exp} into linear algebra as
We begin by translating \Cref{eq:gen_expr_Q_exp} into linear algebra as
\cite[Eq.~3.114]{griffiths_introduction_1995}
\begin{align}
\label{eq:gen_expr_Q_exp_lin}
\braket{Q} = \braket{\psi \vert \hat{Q}\psi}
.%
\end{align}
\autoref{eq:gen_expr_Q_exp_lin} expresses an inherently probabilistic
\Cref{eq:gen_expr_Q_exp_lin} expresses an inherently probabilistic
relationship.
The determinate states are inherently deterministic.
To relate the two, we note that since determinate states should
@@ -757,8 +757,8 @@ We can use the determinate states for this purpose, expressing the state as%
Because of the normalization of the wave function such that
$\int_{-\infty}^{\infty} \lvert \psi(x,t) \rvert^2 dx = 1$, we have
$\sum_{n=1}^{\infty} \lvert c_n \rvert ^2 = 1$.
Inserting \autoref{eq:determinate_basis} into
\autoref{eq:gen_expr_Q_exp_lin} we obtain
Inserting \Cref{eq:determinate_basis} into
\Cref{eq:gen_expr_Q_exp_lin} we obtain
% tex-fmt: off
\cite[Prob.~3.35c)]{griffiths_introduction_1995}
% tex-fmt: on
@@ -795,7 +795,7 @@ referring to the operator $\hat{Q}$.
% Projective measurements
The measurements we considered in the previous section, for which
\autoref{eq:gen_expr_Q_exp_lin} holds, belong to the category of
\Cref{eq:gen_expr_Q_exp_lin} holds, belong to the category of
\emph{projective measurements}.
For these, certain restrictions such as repeatability apply: the act
of measuring a quantum state should \emph{collapse} it onto one of
@@ -809,8 +809,8 @@ they are not relevant to this work.
We can model the collapse of the original state onto one of the
superimposed basis states as a \emph{projection}.
To see this, we use Equations \ref{eq:determinate_basis} and
\ref{eq:observable_eigenrelation} to compute
To see this, we use
\Cref{eq:determinate_basis,eq:observable_eigenrelation} to compute
\begin{align*}
\hat{Q}\ket{\psi} = \sum_{n=1}^{\infty} c_n \hat{Q} \ket{e_n}
= \sum_{n=1}^{\infty} \lambda_n c_n \ket{e_n}
@@ -881,7 +881,8 @@ We fix an orthonormal basis of $\mathbb{C}^2$ to be
.%
\end{align*}
A qubit is defined to be a system with quantum state
\begin{align*}
\begin{align}
\label{eq:gen_qubit_state}
\ket{\psi} =
\begin{pmatrix}
\alpha \\
@@ -889,7 +890,7 @@ A qubit is defined to be a system with quantum state
\end{pmatrix}
= \alpha \ket{0} + \beta \ket{1}
.%
\end{align*}
\end{align}
The overall state of a composite quantum system is described using
the \emph{tensor product}, denoted as $\otimes$
\cite[Sec.~2.2.8]{nielsen_quantum_2010}.
@@ -950,7 +951,7 @@ information is stored in the correlations between the qubits
% The size of the vector space
As we can see in \autoref{eq:product_state}, the number of
As we can see in \Cref{eq:product_state}, the number of
computational basis states needed to express the full composite state
is $2^n$.
This is in contrast to classical systems, where the dimensionality of
@@ -968,7 +969,7 @@ we now shift our focus to describing the evolution of their states.
We model state changes as operators.
Unlike classical systems, where there are only two possible states and
thus the only possible state change is a bit-flip, a general qubit
state as shown in \autoref{eq:gen_qubit_state} lives on a continuum of values.
state as shown in \Cref{eq:gen_qubit_state} lives on a continuum of values.
We thus technically also have an infinite number of possible state changes.
Fortunately, we can express any operator as a linear combination of the
\emph{Pauli operators} \cite[Sec.~2.2]{gottesman_stabilizer_1997}
@@ -1083,8 +1084,8 @@ the gate to the corresponding qubit, where a filled dot is placed.
A controlled gate applies the respective operation only if the
control qubit is in state $\ket{1}$.
An example of this is the CNOT gate introduced in
\autoref{subsec:Qubits and Multi-Qubit States}, which is depicted in
\autoref{fig:cnot_circuit}.
\Cref{subsec:Qubits and Multi-Qubit States}, which is depicted in
\Cref{fig:cnot_circuit}.
\begin{figure}[t]
\centering
@@ -1127,7 +1128,7 @@ Three main restrictions apply \cite[Sec.~2.4]{roffe_quantum_2019}:
impossible to exactly copy the state of one qubit into another.
\item Qubits are susceptible to more types of errors than
just bit-flips, as we saw in
\autoref{subsec:Qubits and Multi-Qubit States}.
\Cref{subsec:Qubits and Multi-Qubit States}.
\item Directly measuring the state of a qubit collapses it onto
one of the determinate states, thereby potentially destroying
information.
@@ -1198,7 +1199,7 @@ whether a state belongs
% $\mathcal{C}$ or $\mathcal{F}$ with a certain probability.
% }
to $\mathcal{C}$ or $\mathcal{F}$.
As explained in \autoref{subsec:Observables}, physical measurements
As explained in \Cref{subsec:Observables}, physical measurements
can be mathematically described using operators whose eigenvalues
are the possible measurement results.
Here, we need an operator with two eigenvalues and the corresponding
@@ -1225,7 +1226,7 @@ ancilla qubit with state $\ket{0}_\text{A}$ and entangle it with
$\ket{\psi}_\text{L}$ in such a way that the eigenvalue is indicated
by measuring the ancilla qubit instead.
More specifically, using a stabilizer measurement circuit as shown in
\autoref{fig:stabilizer_measurement}, we transform the state of the
\Cref{fig:stabilizer_measurement}, we transform the state of the
three-qubit system as
\begin{align}
\label{eq:error_projection}
@@ -1270,7 +1271,7 @@ lies either in one or the other.
This is because the act of measuring the error partly collapses the
state, eliminating the uncertainty about the type of the error
\cite[Sec.~10.2]{nielsen_quantum_2010}.
This can be seen in \autoref{eq:error_projection}, as the expressions
This can be seen in \Cref{eq:error_projection}, as the expressions
$P_\mathcal{C}$ and $P_\mathcal{F}$ constitute projection operators onto
$\mathcal{C}$ and $\mathcal{F}$.
E.g., $P_\mathcal{C}$ will eliminate all components of $E
@@ -1348,7 +1349,7 @@ Similar to the classical case, we can use a syndrome vector to
describe which local codes are violated.
To obtain the syndrome, we simply measure the corresponding
operators $P_i$, each using a circuit as explained in
\autoref{subsec:Stabilizer Measurements}.
\Cref{subsec:Stabilizer Measurements}.
Note that this is an abstract representation of the syndrome extraction.
For the actual implementation in hardware, we can transform this into
a circuit that requires only CNOT and H-gates
@@ -1444,7 +1445,7 @@ vice versa, this property translates into being able to split the
stabilizers into a subset being made up of only $X$
operators and the rest only of $Z$ operators.
We call such codes \ac{css} codes.
We can see this property in \autoref{eq:steane} in the check matrix
We can see this property in \Cref{eq:steane} in the check matrix
of the Steane code.
% Construction
@@ -1514,7 +1515,7 @@ $\bm{H}_Z$ are constructed from two matrices $\bm{A}$ and $\bm{B}$ as
.%
\end{align*}
This way, we can guarantee the satisfaction of the commutativity
condition (\autoref{eq:css_condition}).
condition (\Cref{eq:css_condition}).
To define $\bm{A}$ and $\bm{B}$ we first introduce some additional notation.
We denote the identity matrix as $\bm{I_l} \in \mathbb{F}^{l\times l}$ and
the \emph{cyclic shift matrix} as $\bm{S_l} \in \mathbb{F}^{l\times
@@ -1543,11 +1544,11 @@ and thus lower error rates \cite[Sec.~1]{bravyi_high-threshold_2024}.
% Syndrome-based BP
As we saw in \autoref{subsec:Stabilizer Measurements}, we work only
As we saw in \Cref{subsec:Stabilizer Measurements}, we work only
with the parity information contained in the syndrome, to avoid
disturbing the quantum states of individual qubits.
This necessitates a modification of the standard \ac{bp} algorithm
introduced in \autoref{subsec:Iterative Decoding}
introduced in \Cref{subsec:Iterative Decoding}
\cite[Sec.~3.1]{yao_belief_2024}.
Instead of attempting to find the most likely codeword directly, the
algorithm will now try to find an error pattern $\hat{\bm{e}} \in
@@ -1571,7 +1572,7 @@ indicated by the syndrome, calculating
.
\end{align*}
The resulting syndrome-based \ac{bp} algorithm is shown in
algorithm \ref{alg:syndome_bp}.
\Cref{alg:syndome_bp}.
% tex-fmt: off
\tikzexternaldisable
@@ -1639,7 +1640,7 @@ direction to proceed in \cite[Sec.~5]{yao_belief_2024}.
Another problem is that due to the commutativity property of the stabilizers,
quantum codes inherently contain short cycles
\cite[Sec.~IV.C]{babar_fifteen_2015}.
As discussed in \autoref{subsec:Iterative Decoding}, these lead to
As discussed in \Cref{subsec:Iterative Decoding}, these lead to
the violation of the independence assumption of the messages passed
during decoding, impeding performance.
@@ -1656,10 +1657,16 @@ a hard decision and excluding it from further decoding.
This constrains the solution space more and more as the decoding
progresses, encouraging the algorithm to converge to one of the
solutions \cite[Sec.~5]{yao_belief_2024}.
Algorithm \ref{alg:bpgd} shows this process.
\Cref{alg:bpgd} shows this process.
Note that as the Tanner graph only has $n$ \acp{vn}, this is a
natural constraint on the maximum number of outer iterations of the algorithm.
Quantum degeneracy additionally necessitates some care in the way
error rates are computed in simulations.
We must consider the fact that multiple solutions are valid by
comparing the logical states, computed by measuring the logical operators.
This way, we obtain the \ac{ler}.
% TODO: Explain that setting the channel LLR to infinity is the same
% as a hard decision and ignoring the VN in the further decoding
% tex-fmt: off

View File

@@ -1,5 +1,5 @@
% TODO: Make all [H] -> [t]
\chapter{Fault-Tolerant Quantum Error Correction}
\label{ch:Fault tolerance}
% Intro
@@ -21,7 +21,7 @@ introduces two new challenges \cite[Sec.~4]{gottesman_introduction_2009}:
\item \ac{qec} systems are themselves partially implemented in
quantum hardware. In addition to the errors we have
originally introduced them for, these systems must
be able to acount for the fact they are implemented on noisy
be able to account for the fact they are implemented on noisy
hardware themselves.
\end{itemize}
In the literature, both of these points are viewed under the umbrella
@@ -40,11 +40,10 @@ address both.
% Definition of fault tolerance
% TODO: Different variable name for N?
We model the possible occurrence of errors during any processing
stage as different \emph{error locations} $E_i,~i\in \{1,\ldots,N\}$
in the circuit.
$N \in \mathbb{N}$ is the total number of error locations.
$N \in \mathbb{N}$ is the total number of considered error locations.
The \emph{circuit error vector} $\bm{e} \in \{0,1\}^N$ is a vector
indicating which errors occurred, with
\begin{align*}
@@ -55,7 +54,7 @@ indicating which errors occurred, with
\end{cases}
.%
\end{align*}
\autoref{fig:fault_tolerance_overview} illustrates the flow of errors.
\Cref{fig:fault_tolerance_overview} illustrates the flow of errors.
Specifically for \ac{css} codes, a \ac{qec} procedure is deemed
fault-tolerant, if \cite[Def.~4.2]{derks_designing_2025}
\begin{gather*}
@@ -151,8 +150,7 @@ Typically, the number of syndrome extraction rounds is chosen as $d_\text{min}$.
% Intro
We collect the probabilities of error at each location in the
\emph{noise model}, a vector $\bm{p} \in [0,1]^N$, where $N \in
\mathbb{N}$ is the number of possible error locations.
\emph{noise model}, a vector $\bm{p} \in [0,1]^N$.
There are different types of noise models, each allowing for
different error locations in the circuit.
@@ -161,28 +159,27 @@ different error locations in the circuit.
We will illustrate the most widely used types of error models on the
example of the three-qubit repetition code for $X$ errors.
This is a code with check matrix
\begin{align*}
\bm{H} =
\begin{gather}
\label{eq:rep_code_H}
\bm{H}_Z =
\left[
\begin{array}{ccc|ccc}
0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 1 & 0 \\
0 & 0 & 0 & 0 & 1 & 1
\begin{array}{ccc}
1 & 1 & 0 \\
0 & 1 & 1
\end{array}
\right]
.
\end{align*}
\end{gather}
We can see that it has stabilizers $Z_1Z_2$ and $Z_2Z_3$.
\autoref{fig:pure_syndrome_extraction} shows the corresponding
\Cref{fig:pure_syndrome_extraction} shows the corresponding
syndrome extraction circuit.
We refer to the qubits carrying the logical state
$\ket{\psi}_\text{L}$ as \emph{data qubits}.
Note that this is a concrete implementation using CNOT gates, as
opposed to the system-level view introduced in
\autoref{subsec:Stabilizer Codes}.
\Cref{subsec:Stabilizer Codes}.
We visualize the different types of noise models in
\autoref{fig:noise_model_types}.
\Cref{fig:noise_model_types}.
%%%%%%%%%%%%%%%%
\subsection{Bit-Flip Noise}
@@ -191,10 +188,10 @@ We visualize the different types of noise models in
The simplest type of noise model is \emph{bit-flip} noise.
This corresponds to the classical \ac{bsc}, i.e., only $X$ errors on the
data qubits are possible \cite[Appendix~A]{gidney_new_2023}.
This type of noise model is shown in \autoref{subfig:bit_flip}.
This type of noise model is shown in \Cref{subfig:bit_flip}.
Note that we cannot use bit-flip noise to develop fault-tolerant
systems, as it doesnt't account for errors during the syndrome extraction.
systems, as it does not account for errors during the syndrome extraction.
%%%%%%%%%%%%%%%%
\subsection{Depolarizing Channel}
@@ -203,7 +200,7 @@ systems, as it doesnt't account for errors during the syndrome extraction.
Extending bit-flip noise to consider $X,Z$ or $Y$ instead of just $X$
errors, we obtain the \emph{depolarizing channel}
\cite[Sec.~7.6]{gottesman_stabilizer_1997}, depicted in
\autoref{subfig:depolarizing}.
\Cref{subfig:depolarizing}.
It is well-suited for modeling memory experiments, where data qubits
are stored idly for some period of time and errors accumulate due to
decoherence.
@@ -227,14 +224,14 @@ locations right before each measurement \cite[Appendix~A]{gidney_new_2023}.
Note that it is enough to only consider $X$ errors at these points,
since that is the only type of error directly affecting the
measurement outcomes.
This model is depicted in \autoref{subfig:phenomenological}.
This model is depicted in \Cref{subfig:phenomenological}.
While not fully capturing all possible error mechanisms,
phenomenological noise is already a significant step beyond the code
capacity noise models.
Additionally, there are applications where the
consideration of phenomenological noise is enough.
It can, for example, be used for guiding the design of fault-tolerant
It can, for example, be used to guide the design of fault-tolerant
circuitry [DTTBE25, Sec. 4.2].
%%%%%%%%%%%%%%%%
@@ -242,13 +239,13 @@ circuitry [DTTBE25, Sec. 4.2].
\label{subsec:Circuit-Level Noise}
The most general type of noise model is \emph{circuit-level noise}.
Here we not only consider noise inbetween syndrome extraction rounds
Here we not only consider noise between syndrome extraction rounds
and at the measurements, but at each gate.
Specifically, we allow arbitrary $n$-qubit Pauli errors after each
$n$-qubit gate \cite[Def.~2.5]{derks_designing_2025}.
An $n$-qubit Pauli error is simply a series of correlated Pauli
errors on each related individual qubit.
This type of noise model is shown in \autoref{subfig:circuit_level}.
This type of noise model is shown in \Cref{subfig:circuit_level}.
While phenomenological noise is useful for some design aspects of
fault tolerant circuitry, for simulations, circuit-level noise should
@@ -277,7 +274,48 @@ error locations.
\label{fig:pure_syndrome_extraction}
\end{figure}
\begin{figure}[t]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Detector Error Models}
\label{sec:Detector Error Models}
\emph{Detector error models} (\acsp{dem}) constitute a standardized
framework for
passing information about a circuit used for \ac{qec} to a decoder.
They are also useful as a theoretical tool to aid in the design of
fault-tolerant \ac{qec} schemes.
E.g., they can be used to easily determine whether a measurement
schedule is fault-tolerant \cite[Example~12]{derks_designing_2025}.
Other approaches of implementing fault tolerance exist, such as
flag error correction, which uses additional ancilla qubits to detect
potentially damaging high-weight errors \cite[Sec.~1]{chamberland_flag_2018}.
However, \acp{dem} offer some unique advantages
\cite[Sec.~4.2]{derks_designing_2025}:
\begin{itemize}
\item They distinguish between errors based on their effect on
the measurements, not based on their location in the circuit.
This allows for merging equivalent errors, which decreases
decoding complexity.
\item Errors on the data qubits and on the measurements are
treated in a unified manner. This leads to a more powerful
description of the overall circuit.
\end{itemize}
In this work, we only consider the process of decoding under the
\ac{dem} framework.
% Core idea
To achieve fault tolerance, the goal we strive towards is to
consider the internal errors in addition to the input errors during
the decoding process.
The core idea behind detector error models is to do this by defining
a new \emph{circuit code} that describes the circuit.
Each \ac{vn} of this new code corresponds to an error location in the
circuit and each \ac{cn} corresponds to a syndrome measurement.
% This circuit code, combined with the prior probabilities of error
% given by the noise model, incorporates all information necessary for decoding.
\begin{figure}[H]
\centering
\newcommand{\xerr}{\gate[style={fill=KITblue!50}]{\phantom{1}}}
@@ -323,7 +361,7 @@ error locations.
\label{subfig:bit_flip}
\end{minipage}
\vspace*{5mm}
\vspace*{7mm}
\begin{minipage}{\textwidth}
\centering
@@ -341,7 +379,7 @@ error locations.
\label{subfig:depolarizing}
\end{minipage}
\vspace*{5mm}
\vspace*{7mm}
\begin{minipage}{\textwidth}
\centering
@@ -359,7 +397,7 @@ error locations.
\label{subfig:phenomenological}
\end{minipage}
\vspace*{5mm}
\vspace*{7mm}
\begin{minipage}{\textwidth}
\centering
@@ -390,50 +428,12 @@ error locations.
% tex-fmt: on
\end{minipage}
\vspace*{5mm}
\caption{Types of noise models.}
\label{fig:noise_model_types}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Detector Error Models}
\label{sec:Detector Error Models}
\emph{Detector error models} (\acsp{dem}) constitue a standardized framework for
passing information about a circuit used for \ac{qec} to a decoder.
They are also useful as a theoretical tool to aid in the design of
fault-tolerant \ac{qec} schemes.
E.g., they can be used to easily determine whether a measurement
schedule is fault-tolerant \cite[Example~12]{derks_designing_2025}.
Other approaches of implementing fault tolerance exist, such as
flag error correction, which uses additional ancilla qubits to detect
potentially damaging high-weight errors \cite[Sec.~1]{chamberland_flag_2018}.
However, \acp{dem} offer some unique advantages
\cite[Sec.~4.2]{derks_designing_2025}:
\begin{itemize}
\item They distinguish between errors based on their effect on
the measurements, not based on their location in the circuit.
This allows for merging equivalent errors, which decreases
decoding complexity.
\item Errors on the data qubits and on the measurements are
treated in a unified manner. This leads to a more powerful
description of the overall circuit.
\end{itemize}
In this work, we only consider the process of decoding under the
\ac{dem} framework.
% Core idea
To achieve fault tolerance, the goal we strive towards is to
consider the internal errors in addition to the input errors during
the decoding process.
The core idea behind detector error models is to do this by defining
a new \emph{circuit code} that describes the circuit.
Each \ac{vn} of this new code corresponds to an error location in the
circuit and each corresponds to a \ac{cn} to a syndrome measurement.
This circuit code, combined with the prior probabilities of error
given by the noise model, incorporates all information necessary for decoding.
%%%%%%%%%%%%%%%%
\subsection{Measurement Syndrome Matrix}
\label{subsec:Measurement Syndrome Matrix}
@@ -441,56 +441,118 @@ given by the noise model, incorporates all information necessary for decoding.
% Mathematical definition
We describe the circuit code using the \emph{measurement syndrome
matrix} matrix $\bm{\Omega} \in \mathbb{F}_2^{m\times N}$, with
matrix} $\bm{\Omega} \in \mathbb{F}_2^{M\times N}$, with
\begin{align*}
\Omega_{j,i} =
\Omega_{\ell,i} =
\begin{cases}
1, & \text{Error $i$ flips measurement $j$}\\
1, & \text{Error $i$ flips measurement $\ell$}\\
0, & \text{otherwise}
\end{cases}
.%
,%
\end{align*}
This matrix thus defines the code based on which error mechanism
flips which measurement, rather than the Pauli type and location of
each error \cite[Sec.~1.4.3]{higgott_practical_2024}.
where $M \in \mathbb{N}$ is the number of measurements.
To obtain $\bm{\Omega}$, we must propagate Pauli errors through the
circuit, tracking which measurements they affect
\cite[Sec.~2.4]{derks_designing_2025}.
% Example
% TODO: Fix syndrome dimension notation
We turn to our example of the three-qubit repetition code to
illustrate the construction of the syndrome measurement matrix.
We begin by replicating the syndrome extraction circuitry, three
times in this case, as can be seen in
\autoref{fig:rep_code_multiple_rounds_bit_flip}.
We consider only bit flip noise at this stage.
For each syndrome extraction round we get an additional set of
syndrome measurements.
We combine these measurements by stacking them in a new vector $\bm{s}
\in \mathbb{F}_2^{n_\text{rounds}\cdot(n-k)}$.
To accomodate the additional syndrome bits, we extend the
matrix $\bm{\Omega}$ representing the circuit by replicating the rows as well.
We begin by extending our check matrix in \Cref{eq:rep_code_H}
to represent three rounds of syndrome extraction.
Each round yields an additional set of syndrome bits,
and we combine them by stacking them in a new vector
$\bm{s} \in \mathbb{F}_2^{R(n-k)}$.
We thus have to replicate the rows of $\bm{\Omega}$, once for each
additional syndrome measurement, to obtain
\begin{align*}
\bm{\Omega} =
\begin{pmatrix}
1 & 1 & 0 \\
0 & 1 & 1 \\
1 & 1 & 0 \\
0 & 1 & 1 \\
1 & 1 & 0 \\
0 & 1 & 1 \\
\end{pmatrix}
.%
\end{align*}
\Cref{fig:rep_code_multiple_rounds_bit_flip}
depicts the corresponding circuit.
Note that we have not yet introduced error locations in the syndrome
extraction circuitry, so we still consider only bit flip noise at this stage.
Recall that $\bm{\Omega}$ describes which \ac{vn} is connected to
which parity check and the syndrome indicates which parity checks
are violated.
This means that if an error exists at only a single \ac{vn}, we can
read off the syndrome in the corresponding column.
If errors occur at multiple locations, the resulting syndrome will be
the linear combination of the respective columns.
We thus have
\begin{align*}
\bm{s} \in \text{span} \{\bm{\Omega}\}
.%
\end{align*}
% Expand to phenomenological
We now whish to expand the error model to phenomenological noise, though
We now wish to expand the error model to phenomenological noise, though
only considering $X$ errors in this case.
We introduce new error locations at the respective positions,
We introduce new error locations at the appropriate positions,
arriving at the circuit depicted in
\autoref{fig:rep_code_multiple_rounds_phenomenological}.
\Cref{fig:rep_code_multiple_rounds_phenomenological}.
For each additional error location, we extend $\bm{\Omega}$ by
appending the corresponding syndrome vector as a column.
\begin{gather}
\label{eq:syndrome_matrix_ex}
\bm{\Omega} =
\left(
\begin{array}{ccccccccccccccc}
1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0
& 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0
& 0 & 0 & 0 & 0 & 0 \\
1 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 1 & 0
& 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 1
& 0 & 0 & 0 & 0 & 0 \\
1 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 0 & 0
& 1 & 1 & 0 & 1 & 0 \\
0 & 1 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 0
& 0 & 1 & 1 & 0 & 1
\end{array}
\right) . \\[-6mm]
\hspace*{-58.7mm}
\underbrace{
\phantom{
\begin{array}{ccc}
0 & 0 & 0
\end{array}
}
}_\text{Original matrix}
\end{gather}
Notice that the first three columns correspond to the original
measurement syndrome matrix, as these columns correspond to the error
locations on the data qubits.
In this example, all measurements we considered were syndrome measurements.
Assuming no errors, the results of those measurements were
deterministic, irrespective of the actual logical state
$\ket{\psi}_\text{L}$, as they only depend on whether
$\ket{\psi}_\text{L} \in \mathcal{C}$, not on the concrete state.
It is, in general, possible to also consider non-deterministic measurements.
As an example, it is usual to consider a round of noiseless
measurements of the actual data qubit states after the last syndrome
extraction round.
\begin{figure}[t]
\centering
\newcommand{\preperr}[1]{
\gate[style={fill=blue!20}]{\scriptstyle #1}
}
\begin{minipage}{0.3\textwidth}
\centering
\begin{tikzpicture}
@@ -540,14 +602,10 @@ appending the corresponding syndrome vector as a column.
\end{gather*}
\end{minipage}
\newcommand{\preperr}[1]{
\gate[style={fill=blue!20}]{\scriptstyle #1}
}
\vspace*{5mm}
\begin{quantikz}[
row sep=4mm, column sep=4mm,
row sep=4mm, column sep=3.4mm,
wire types={q,q,q,q,q,n,n,n,n},
execute at end picture={
\draw [
@@ -694,96 +752,142 @@ appending the corresponding syndrome vector as a column.
\end{figure}
%%%%%%%%%%%%%%%%
\subsection{Detector Error Matrix}
\label{subsec:Detector Error Matrix}
\subsection{Detector Matrix}
\label{subsec:Detector Matrix}
% Core idea
% TODO: Make this a proper definition?
Instead of using the measurements as parity indicators directly, we
may wish to combine them in some way.
We call such combinations \emph{detectors}.
Formally, a detector is a parity constraint on a set of measurement
outcomes \cite[Def.~2.1]{derks_designing_2025}.
Changing the perspective in this way does not alter the theoretical
error correcting capabilities of the circuit, but it may change the
decoding performance when using a practical decoder.
\red{[Possibly a few more words on this (maybe a mathematical
proof/intuition?)]}
Instead of using stabilizer measurement results directly, we
generalize the notion of what constitutes a parity check slightly.
We formally define a \emph{detector} as a deterministic parity constraint on
a set of measurement outcomes \cite[Def.~2.1]{derks_designing_2025}.
In the most straightforward case, we may simply use the stabilizer
measurements as detectors.
We immediately recognize that we will have as many linearly
independent detectors as there are separate deterministic measurements.
We generally aim to utilize the maximum number of linearly
independent detectors \cite[Sec.~2.2]{derks_designing_2025}.
% The detector matrix
% TODO: Fix the notation mess
We describe the relationship between measurements and detectors using
the \emph{detector matrix} $\bm{D} \in \mathbb{F}_2^{d\times m}$
\cite[Def.~2.2]{derks_designing_2025}.
Similar to the way a \ac{pcm} connects bits with parity checks, the
the \emph{detector matrix} $\bm{D} \in \mathbb{F}_2^{D\times M}$
\cite[Def.~2.2]{derks_designing_2025}, with $~D\in \mathbb{N}$
denoting the number of detectors.
Similar to the way a \ac{pcm} associates bits with parity checks, the
detector matrix links measurements and detectors.
Each column corresponds to a measurement, while the rows correspond
to the detectors.
Each column corresponds to a measurement, while each rows corresponds
to a detector.
We should note at this point that the combination of measurements
into detectors has no bearing on the actual construction of the
syndrome extraction circuitry.
It is something that happens ``virtually'' after the fact and only
affects the decoder.
Note that we can use the detector matrix $\bm{D}$ to describe the set
of possible measurement outcomes under the absence of noise.
The same way we use a \ac{pcm} to describe the code space as
\begin{align*}
\mathcal{C}
= \{ \bm{x} \in \mathbb{F}_2^{n} : \bm{H}\bm{x}^\text{T} = \bm{0} \}
,%
\end{align*}
the set of possible measurement outcomes is simply $\text{kern}\{\bm{D}\}$
\cite[Sec.~2.2]{derks_designing_2025}.
%%%%%%%%%%%%%%%%
\subsection{Detector Error Matrix}
\label{subsec:Detector Error Matrix}
% The detector error matrix
We now know how the errors at different locations in the circuit
affect the measurements ($\bm{\Omega}$), and we know how the
measurements relate to the detectors ($\bm{D}$).
affect the measurements (through $\bm{\Omega}$), and we know how the
measurements relate to the detectors (through $\bm{D}$).
For decoding, we are interested in the effect of the errors on the
detectors directly.
We thus construct the \emph{detector error matrix} $\bm{H} \in
\mathbb{F}_2^{d\times N}$ \cite[Def.~2.9]{derks_designing_2025} as
\mathbb{F}_2^{D\times N}$ \cite[Def.~2.9]{derks_designing_2025} as
\begin{align*}
\bm{H} := \bm{D}\bm{\Omega}
.%
\end{align*}
Note that, in particular when $d=m$, this is equivalent to performing row
additions on the matrix $\bm{\Omega}$.
% There are multiple ways of choosing the detectors
There is a degree of freedom in how we choose the detectors, which is
reflected in the fact that we can construct multiple different
detector matrices $\bm{D}$ from the same circuit.
For two detector matrices $\bm{D}_1$ and $\bm{D}_2$, as long as
\begin{gather}
\label{eq:kern_condition}
\text{kern}\{\bm{D}_1\} = \text{kern}\{\bm{D}_2\}
\end{gather}
they describe the same set of possible measurement outcomes (under
the absence of noise) and thus the same circuit.
In fact, as long as \Cref{eq:kern_condition} holds, the detector
error matrices constructed from them can distinguish between the
same pairs of error sets \cite[Lemma~6]{derks_designing_2025}.
To see this, we note that we can distinguish between two circuit
error vectors $\bm{e}_1$ and $\bm{e}_2$ as long as they do not
violate the same set of detectors, i.e.,
\begin{align*}
\hspace{-15mm}
% tex-fmt: off
&& \bm{H} \bm{e}_1^\text{T} & \neq \bm{H} \bm{e}_2^\text{T} \\
\iff \hspace{-33mm} && \bm{H} \left( \bm{e}_1 - \bm{e}_2 \right)^\text{T} & \neq 0 \\
\iff \hspace{-33mm} && \bm{D} \bm{\Omega} \left( \bm{e}_1 - \bm{e}_2 \right)^\text{T} & \neq 0 \\
\iff \hspace{-33mm} && \bm{\Omega} \left( \bm{e}_1 - \bm{e}_2 \right)^\text{T} & \notin \text{kern} \{\bm{D}\}
% tex-fmt: on
.%
\end{align*}
We conclude that altering our perspective by choosing a different
detector matrix
does not modify the error correcting capabilities of the code.
It may, however, change the decoding performance when using a practical decoder.
% How to choose the detectors
% TODO: Give results from current and previous stage mathematical names
We still have a degree of freedom in how we choose the detectors.
\ldots
What constitutes a good set of detectors is difficult to assess
without performing explicit decoding simulations, since it ultimately
depends on the decoder employed.
For iterative decoders, high sparsity is generally beneficial, but
finding detectors that maximize sparsity is an NP-complete problem
\cite[Sec.~2.6]{derks_designing_2025}.
There is, however, one way of defining the detectors that will prove useful
at a later stage.
To the measurement results from each syndrome extraction round, we
To the measurement results from each syndrome extraction round we
can add the results from the previous round, as illustrated in
\autoref{fig:detectors_from_measurements_general}.
\red{[Mathematical notation for measurement combination]}
\Cref{fig:detectors_from_measurements_general}.
We thus have $D=n-k$.
Concretely, we denote the outcome of
measurement $\ell \in \{1,\ldots,n-k\}$ in round $r \in \{1,\ldots,R\}$ by
$m_\ell^{(r)} \in \mathbb{F}_2$
and define
\begin{gather*}
\bm{m}^{(r)} :=
\begin{pmatrix}
m_1^{(r)} \\
\vdots \\
m_{n-k}^{(r)}
\end{pmatrix}
.%
\end{gather*}
Similarly, we denote the outcome of detector $j\in\{1,\ldots,D\}$ in
round $r$ by $d_j^{(r)} \in \mathbb{F}_2$ and define
\begin{gather}
\label{eq:measurement_combination}
\bm{d}^{(r)} =
\begin{pmatrix}
d_1^{(r)} \\
\vdots \\
d_D^{(r)}
\end{pmatrix}
:= \bm{m}^{(r)} + \bm{m}^{(r-1)}
,%
\end{gather}
with $\bm{m}^{(0)} = \bm{0}$.
We again turn our attention to the three-qubit repetition code.
In \autoref{fig:rep_code_multiple_rounds_phenomenological} we can see
that $E_6$ has occurred and has subsequently tripped the last four measurements.
We now take those measurements and combine them according to
\red{[Reference mathematical notation above]}.
We can see this process graphically in
\autoref{fig:detectors_from_measurements_rep_code}
To understand why this way of defining the detectors is useful, we
note that the error $E_6$ in
\autoref{fig:rep_code_multiple_rounds_phenomenological} has not only
tripped the measurements in the syndrome extraction round immediately
afterwards, but all subsequent ones as well.
To only see errors in the rounds immediately following them, we
consider our newly defined detectors instead of the measurements,
that effectively compute the difference between the measurements.
Each error can only trip syndrome bits that follow it.
We can see this in the triangular structure of $\bm{\Omega}$ in
\autoref{fig:rep_code_multiple_rounds_phenomenological}.
Combining the measurements into detectors according to
\red{[Reference mathematical notation above]}, we are performing row
additions in such a way as to clear the bottom left of the matrix.
This yields a block-diagonal structure for the detector error matrix
$\bm{H}$, as in the example in
\autoref{fig:detectors_from_measurements_rep_code}.
Note that we exploit the fact that each syndrome measurement round is
identical to obtain this structure.
% TODO: Change notation (\bm{D})
\begin{figure}[t]
\centering
@@ -801,7 +905,7 @@ identical to obtain this structure.
& \wire[d][3]{c} & & \wire[d][1]{c} & & \wire[d][1]{c} & & \wire[d][1]{c} & \\
& \ctrl[wire=c]{0}\wire[r][1]{c} & \wire[d][1]{c} & \ctrl[vertical wire=c]{1}\wire[r][1]{c} & \wire[d][1]{c} & \ctrl[vertical wire=c]{1}\wire[r][1]{c} & \wire[d][1]{c} & \ctrl[vertical wire=c]{1}\wire[r][1]{c} & \\
& & \wire[r][1]{c} & \targ{}\wire[d][1]{c} & \wire[r][1]{c} & \targ{}\wire[d][1]{c} & \wire[r][1]{c} & \targ{}\wire[d][1]{c} & \\
& \gate[1]{\bm{D}_1} & & \gate[1]{\bm{D}_2} & & \gate[1]{\bm{D}_3} & & \gate[1]{\bm{D}_4} & \\
& \gate[1]{\bm{d}^{(1)}} & & \gate[1]{\bm{d}^{(2)}} & & \gate[1]{\bm{d}^{(3)}} & & \gate[1]{\bm{d}^{(4)}} & \\
\end{quantikz}
% tex-fmt: on
@@ -809,11 +913,49 @@ identical to obtain this structure.
\label{fig:detectors_from_measurements_general}
\end{figure}
We again turn our attention to the three-qubit repetition code.
In \Cref{fig:rep_code_multiple_rounds_phenomenological} we can see
that $E_6$ has occurred and has subsequently tripped the last four measurements.
We now take those measurements and combine them according to
\Cref{eq:measurement_combination}.
We can see this process graphically in
\Cref{fig:detectors_from_measurements_rep_code}.
To understand why this way of defining the detectors is useful, we
note that the error $E_6$ in
\Cref{fig:rep_code_multiple_rounds_phenomenological} has not only
tripped the measurements in the syndrome extraction round immediately
afterwards, but all subsequent ones as well.
To only see errors in the rounds immediately following them, we
consider our newly defined detectors instead of the measurements,
that effectively compute the difference between the measurements.
Each error can only trip syndrome bits that follow it.
This is reflected in the triangular structure of $\bm{\Omega}$ in
\Cref{eq:syndrome_matrix_ex}.
Combining the measurements into detectors according to
\Cref{eq:measurement_combination}, we are effectively performing
row additions in such a way as to clear the bottom left of the matrix.
The detector error matrix
\begin{align*}
\bm{H} =
\left(
\begin{array}{ccccccccccccccc}
1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 1
\end{array}
\right)
\end{align*}
we obtain this way has a block-diagonal structure.
Note that we exploit the fact that each syndrome measurement round is
identical to obtain this structure.
\begin{figure}[t]
\centering
\hspace*{-5mm}
\begin{minipage}{0.42\textwidth}
\newcommand{\redwire}[1]{
\wire[r][#1][style={draw=red, line width=1.5pt, double}]{q}
}
@@ -841,24 +983,6 @@ identical to obtain this structure.
& \redmeter{}\inwire\redwire{3} & & & \redtarg\wire[r][3]{c} & & & \gate{D_6}
\end{quantikz}
% tex-fmt: on
\end{minipage}%
\begin{minipage}{0.56\textwidth}
\newcommand\cc{\cellcolor{orange!20}}
\begin{align*}
\bm{H} =
% tex-fmt: off
\left(\begin{array}{ccccccccccccccc}
1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
\cc{0} & \cc{0} & \cc{0} & \cc{1} & \cc{0} & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
\cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{1} & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
\cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{1} & \cc{0} & 1 & 1 & 0 & 1 & 0 \\
\cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{0} & \cc{1} & 0 & 1 & 1 & 0 & 1
\end{array}\right)
% tex-fmt: on
\end{align*}
\end{minipage}
\caption{Construction of detectors from the measurements of a
three-qubit repetition code.}
@@ -870,18 +994,27 @@ identical to obtain this structure.
\label{subsec:Detector Error Models}
A \emph{detector error model} is the combination of the detector
error matric $\bm{H}$ and the noise model $\bm{p}$.
\content{Combination of detector error matrix and noise model}
\content{Contains all information necessary for decoding
\cite[Intro.]{derks_designing_2025}}
\content{Not only useful for decoding, but also for ... (Derks et al.)}
error matrix $\bm{H}$ and the noise model $\bm{p}$.
\cite[Sec.~6]{derks_designing_2025}.
It serves as an abstract representation of a circuit and can be used
both to transfer information to a decoder but also to aid in the
design of fault-tolerant systems.
E.g., it can be used to investigate the properties of a circuit with
respect to fault tolerance.
It contains all information necessary for the decoding process.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Practical Considerations}
\label{sec:Practical Considerations}
% Practical simulation aspects
The previous sections give an overview over available noise models
and the function of \acp{dem}.
In order to successfully apply these concepts in practice, we must
consider a few further aspects.
%%%%%%%%%%%%%%%%
\subsection{Choice of Noise Model}
\label{subsec:Choice of Noise Model}
While these types of noise models give us some constraints on the
types and locations of errors, the question of how exactly to choose
@@ -896,28 +1029,100 @@ For circuit-level noise, various options exist, such as the \emph{SI1000}
measurements) models \cite[Sec.~2.1]{gidney_fault-tolerant_2021}.
These differ in the way they compute individual error probabilities
from the physical error rate.
In this work we only consider \emph{standard circuit-based depolarizing
noise}, as this is the standard approach in the literature.
We thus set the error probabilities of all error locations in the
circuit-level noise model to the same value, the physical error rate $p$.
\content{Intro}
circuit-level noise model to the same value, the physical error rate
$p_\text{phys}$.
%%%%%%%%%%%%%%%%
\subsection{Practical Methodology}
\label{subsec:Practical Methodology}
\subsection{Per-Round Logical Error Rate}
\label{subsec:Per-Round Logical Error Rate}
\content{Per-round-LER explanation}
% Per-round LER
Another aspect that is important to consider is the meaning of the
\ac{ler} in the context of a \ac{qec} system with multiple
rounds of syndrome measurements.
In order to facilitate the comparability of results obtained from
simulations with different numbers of syndrome extraction rounds, we
use the \emph{per-round-\ac{ler}}.
The simplest way of calculating the per-round \ac{ler} is by modeling
each round as an independent experiment.
For each experiment, an error might occur with a certain probability
$p_\text{e,round}$.
The overall probability of error is then
\begin{align}
\hspace{-12mm}
p_\text{e,total} &= 1 - (1 - p_\text{e,round})^{R} \nonumber\\
\label{eq:per_round_ler}
\implies \hspace{3mm} p_\text{e,round} &=
1 - (1 - p_\text{e,total})^{1 / R}
.%
\hspace{12mm}
\end{align}
We approximate $p_\text{e,total}$ using a Monte Carlo simulation and
compute the per-round-\ac{ler} using \Cref{eq:per_round_ler}.
This is a common approach taken in the literature
\cite{gong_toward_2024}\cite{wang_fully_2025}.
Another common approach \cite{chen_exponential_2021}%
\cite{bausch_learning_2024}\cite{beni_tesseract_2025} is to assume an
exponential decay for the decoder's \emph{logical fidelity}
\cite[Eq.~2]{bausch_learning_2024}
\begin{align*}
F_\text{total} = (F_\text{round})^{R}
.%
\end{align*}
The logical fidelity is a measure of the quality of a logical state
\cite[Appendix~E]{postler_demonstration_2024}.
As it is related to the error rate through $F = 1 - 2p$, we obtain
\cite[Eq.~4]{bausch_learning_2024}
\begin{align}
(1 - 2p_\text{e,total}) &= (1 - 2p_\text{e,round})^{R} \nonumber\\
\implies \hspace{15mm} p_\text{e,round} &= \frac{1}{2}
\left[ 1 - (1 - 2p_\text{e,total})^{1/R} \right]
.%
\end{align}
We have chosen to use the first approach, i.e.,
\Cref{eq:per_round_ler}, as the related literature is closer in
topic to our own work.
%%%%%%%%%%%%%%%%
\subsection{Stim}
\label{subsec:Stim}
\content{Circuit code heavily depends on the exact circuit construction}
\content{Not easy to predict how errors at different locations
propagate through the circuit an what detectors they affect}
It is not immediately apparent how the \ac{dem} will look from looking
at a code's \ac{pcm}, because it heavily depends on the exact circuit
construction and choice of noise model.
As we noted in \Cref{subsec:Measurement Syndrome Matrix}, we can
obtain a measurement syndrome matrix by propagating Pauli frames
through the circuit.
The standard choice of simulation tool used for this purpose is
\emph{stim}%
\footnote{https://github.com/quantumlib/Stim}
\cite{gidney_stim_2021}, which is available as a Python 3
pypi package.
In fact, it was in this tool that the concept of the \ac{dem} was
first introduced.
\content{Stim is a software package that generates DEMs from circuits}
\content{The user still has to define the circuit themselves, and
especially the detectors \cite[Sec~2.5]{derks_designing_2025}}
One capability of stim, and \acp{dem} in general, that we didn't go
into detail about in this chapter is the merging of error mechanisms.
Since \acp{dem} differentiate errors based on their effect on the
measurements and not on their Pauli type and location
\cite[Sec.~1.4.3]{higgott_practical_2024}, it is natural to group
errors that have the same effect.
This slightly lowers the computational complexity of decoding, as the
number of resulting \acp{vn} is reduced.
While stim is a useful tool for circuit simulation, it doesn't
include many utilities for building syndrome extraction circuitry automatically.
The user has to define most, if not all, of the circuit manually,
depending on the code in question.
This is somewhat natural, as stim is meant first and foremost as a
simulator, and circuit generation is contingent upon the \ac{qec}
scheme in question.

View File

@@ -1,19 +1,317 @@
% TODO: Make all [H] -> [t]
\chapter{Decoding under Detector Error Models}
% Intro
In \Cref{ch:Fundamentals} we introduced the fundamentals of classical
error correction, before moving on to quantum information science and
finally combining the two in \acf{qec}.
In \Cref{ch:Fault tolerance} we then turned to fault-tolerance, with
a focus on a specific way of implementing it, called \acfp{dem}.
In this chapter, we move on from the fundamental concepts and examine
how to apply them in practice.
Specifically, we concern ourselves with the practical aspects of decoding
under \acp{dem}.
\content{Intro}
We investigate decoding \acf{qldpc} codes under \acp{dem} in particular.
We focus on \ac{qldpc} codes, as they have emerged as leading
candidates for practical quantum error correction, offering the
ability to encode more logical qubits per physical qubit than surface
codes while maintaining favorable threshold properties
\cite[Sec.~1]{bravyi_high-threshold_2024}.
Because of this, the decoding algorithms we consider will all be
related to \acf{bp} in some way.
Our aim is to build a fault-tolerant \ac{qec} system that works well
even under consideration of circuit-level noise.
We must overcome two main challenges to achieve this.
First, recall the problems related to degeneracy, which is inherent
to quantum codes.
Because multiple minimum-weight codewords exist, the \ac{bp}
algorithm becomes uncertain of the direction to proceed in.
Additionally, the commutativity conditions of the stabilizers
necessitate the existence of short cycles.
These two aspects together lead to substantial convergence problems
of \ac{bp} for quantum codes, when it is used on it's own.
Second, the consideration of circuit-level noise introduces many more
error locations into the circuit.
Using \acp{dem}, we construct a new circuit code and model each of
these error locations as a new \acf{vn}.
We also perform multiple rounds of syndrome measuremetns,
exacerbating the problem.
This leads to a massively increased computational complexity and
latency of the decoding process.
In our experiments using the $\llbracket 144,12,12 \rrbracket$
\acf{bb} code with $12$ syndrome measurement rounds, for example, the
number of \acp{vn} was increased from $144$ to $9504$, and the
number of \acfp{cn} was increased from $72$ to $1008$.
The first problem is not inherent to \acp{dem} or fault-tolerance,
but rather quantum codes in general.
Many different approaches to solving it exist, usually centered
around somehow modifying \ac{bp}.
The most popular approach is combining a few initial
iterations of \ac{bp} with a second decoding algorithm, \ac{osd}
\cite{roffe_decoding_2020}.
Other approaches exist, such as \ac{aed}
\cite{koutsioumpas_automorphism_2025}, were multiple variations of
the code are decoded simultaneously to increase the chances of convergence.
Here, we will focus on the \acf{bpgd} algorithm
\cite{yao_belief_2024} we already introduced in \Cref{ch:Fundamentals},
for reasons that will become clear later in the chapter.
The second problem is inherent to decoding using \acp{dem}.
This is an area that has been less studied.
As we saw in \Cref{sec:Quantum Error Correction}, for \ac{qec},
latency is the main constraint, not raw computational complexity.
The main way this is addressed in the literature is \emph{sliding
window decoding}, which attempts to divide the overall decoding
problem into many smaller ones that can be solved more efficiently.
% TODO: This could potentially be abit more text (e.g., go into
% SC-LDPC like structure that serves as the inspiration for the
% warm-start decoding. Or just go into warm-start decoding)
We will start by briefly reviewing the existing work related to
sliding-window decoding,
before focusing on one specific incarnation.
We will then introduce a modification to the existing algorithm and
perform numerical simulations to evaluate it.
% and reducing latency is the main goal of the existing literature.
% This is generally done using windowing approaches; either
% sliding-window based, where the latency is reduced due an earlier
% start to the decoding process \cite{kuo_fault-tolerant_2024}%
% \cite{huang_improved_2023}\cite{huang_increasing_2024}\cite{gong_toward_2024},
% or by decoding multiple windows in parallel
% \cite{skoric_parallel_2023}\cite{tan_scalable_2023}.
% This work is based on the sliding-window method.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Sliding-Window Decoding}
\label{sec:Sliding-Window Decoding}
% Intro
% Spacetime codes
\content{Callback to previous chapter}
\content{(Maybe even historical) overview of the literature}
\content{Better yet: A proper (at least as proper as possible) review}
\ac{qec} codes are often viewed through the lenses of the
\emph{space} and \emph{time} dimensions.
Both directions add redundancy, but they do so in a different way and
guard against different defects.
The space dimension corresponds to the redundancy added through the
code itself, while the time dimension corresponds to the repetition
of the syndrome measurements \cite[Sec.~IV.B]{dennis_topological_2002}.
% Basic idea
The idea of sliding-window decoding is to exploit the time-like
structure by splitting the circuit into overlapping windows along the
time dimension.
Each of these windows is then decoded separately.
%%%%%%%%%%%%%%%%
\subsection{Existing Literature}
\label{subsec:Existing Literature}
% Review of existing literature
Research on this topic has been ongoing for some time, though mostly
for topological codes.
The literature on \ac{qldpc} codes is more limited. Figure
\Cref{fig:literature} gives an overview of the related body of work.
\red{
\begin{itemize}
\item \cite{huang_increasing_2024} use BP+OSD,
\cite{gong_toward_2024} use BP+GDG
\item \cite{huang_improved_2023} use phenomenological noise,
\cite{gong_toward_2024} circuit-level noise
\item Go into the way the parallel decoding approaches
consolidate the overlap regions
\item \cite{huang_improved_2023} use hypegraph and lifted
product codes, \cite{gong_toward_2024} use BB codes
\item \cite{kuo_fault-tolerant_2024} use toric codes, the
rest of the topological papers surface codes
\item \cite{dennis_topological_2002} call their scheme ``overlap-add''
\item QUITS views sliding-window decoding more separately
\item Reasons for latency improvement ()
\end{itemize}
}
\begin{figure}[H]
\centering
\tikzset{
literature/.append style={
minimum width=6mm,
minimum height=6mm,
text width=18mm,
align=left,
}
}
\tikzset{
heading/.append style={
draw=black,
minimum width=22mm,
minimum height=6mm,
align=left,
rounded corners = 1mm,
}
}
\begin{tikzpicture}[node distance = 0mm and 0mm]
% tex-fmt: off
\node[heading, minimum width=15mm, fill=gray!25] (code) {Code};
\node[heading, below right=1mm and -5mm of code, fill=orange!20] (top) {Topological};
\node[heading, below right=42mm and -5mm of code, fill=orange!20] (qldpc) {QLDPC};
\node[literature, below right=0mm and -12mm of top] (dennis) {\cite{dennis_topological_2002}};
\node[literature, below=of dennis] (tan) {\cite{tan_scalable_2023}};
\node[literature, below=of tan] (skoric) {\cite{skoric_parallel_2023}};
\node[literature, below=of skoric] (bombin) {\cite{bombin_modular_2023}};
\node[literature, below=of bombin] (kuo) {\cite{kuo_fault-tolerant_2024}};
\node[literature, below right=0mm and -12mm of qldpc] (huang) {\cite{huang_improved_2023},\cite{huang_increasing_2024}};
\node[literature, below=of huang] (gong) {\cite{gong_toward_2024}};
\coordinate (code-anchor) at ($(code.south) + (-2mm,0)$);
\coordinate (top-anchor) at ($(top.south) + (-5mm,0)$);
\coordinate (qldpc-anchor) at ($(qldpc.south) + (-5mm,0)$);
\draw (code-anchor) |- (top);
\draw (code-anchor) |- (qldpc);
\draw (top-anchor) |- (dennis);
\draw (top-anchor) |- (tan);
\draw (top-anchor) |- (skoric);
\draw (top-anchor) |- (bombin);
\draw (top-anchor) |- (kuo);
\draw (qldpc-anchor) |- (huang);
\draw (qldpc-anchor) |- (gong);
\draw [
line width=1pt,
decorate,
decoration={brace,amplitude=2mm,raise=5mm}
]
(dennis.north east) -- (dennis.south east)
node[midway,right,xshift=10mm]{Sequential};
\draw [
line width=1pt,
decorate,
decoration={brace,amplitude=2mm,raise=5mm}
]
(tan.north east) -- (kuo.south east)
node[midway,right,xshift=10mm]{Parallel};
\draw [
line width=1pt,
decorate,
decoration={brace,amplitude=2mm,raise=5mm}
]
(huang.north east) -- (gong.south east)
node[midway,right,xshift=10mm]{Sequential};
% tex-fmt: on
\end{tikzpicture}
\caption{Overview of literature on sliding-window decoding.}
\label{fig:literature}
\end{figure}
% \red{
% Existing work
% \begin{itemize}
% \item \cite{gong_toward_2024}
% \begin{itemize}
% \item BB codes (QLDPC)
% \item Circuit-level noise
% \item Sequential
% \item Cites $\underbrace{\cite{dennis_topological_2002}
% \cite{tan_scalable_2023}
% \cite{skoric_parallel_2023}}_\text{Surface code}
% \underbrace{\cite{huang_improved_2023}}_\text{QLDPC,Phenomenological}$
% \end{itemize}
% \item \cite{huang_improved_2023}
% \begin{itemize}
% \item Hypergraph product codes, Lifted product codes (QLDPC)
% \item Phenomenological noise
% \item Sequential
% \item Cites $\underbrace{\cite{dennis_topological_2002}
% [Huang, Brown, 2021]
% \cite{skoric_parallel_2023}
% \cite{tan_scalable_2023}
% \cite{bombin_modular_2023}}_\text{Surface code}$
% \end{itemize}
% \item \cite{dennis_topological_2002}
% \begin{itemize}
% \item Surface code (Topological)
% \item No idea what noise, don't care either (Gong et
% al. say circuit-level noise)
% \item ``Overlapping recovery'' -> Sequential
% \end{itemize}
% \item \cite{tan_scalable_2023}
% \begin{itemize}
% \item Surface code (Topological)
% \item Circuit-level noise
% \item Parallel
% \item Cites \cite{dennis_topological_2002}
% \end{itemize}
% \item \cite{skoric_parallel_2023}
% \begin{itemize}
% \item Surface code (Topological)
% \item Circuit-level noise
% \item Parallel
% \item Cites \cite{dennis_topological_2002}
% \end{itemize}
% \item \cite{huang_increasing_2024}
% \begin{itemize}
% \item Same as \cite{huang_improved_2023}
% \end{itemize}
% \item \cite{kuo_fault-tolerant_2024}
% \begin{itemize}
% \item Toric codes (Topological)
% \item Circuit-level noise
% \item Parallel
% \item Cites \cite{dennis_topological_2002}
% \cite{tan_scalable_2023}
% \cite{skoric_parallel_2023} \cite{gong_toward_2024}
% \end{itemize}
% \item \cite{bombin_modular_2023}
% \begin{itemize}
% \item Surface codes (Topological)
% \item No idea if it's even fault-tolerant
% \item Parallel
% \item Cites \cite{dennis_topological_2002}
% \cite{tan_scalable_2023}
% \cite{skoric_parallel_2023} \cite{leverrier_decoding_2022}
% \end{itemize}
% % This is not BP and not parallelization over the time dimension
% % \item \cite{leverrier_decoding_2022}
% % \begin{itemize}
% % \item Quantum tanner codes (QLDPC)
% % \item Parallel
% % \item No idea if it's even fault-tolerant
% % \item Cites [don't care]
% % \end{itemize}
% \item \cite{kang_quits_2025}
% \begin{itemize}
% \item Cites \cite{huang_increasing_2024} \ldots
% \end{itemize}
% \end{itemize}
% }
\content{Possibly go into the fact that current sliding-window
approaches don't differentiate clearly between the sliding-window
part and the decoder part. This work aims to extend the
sliding-window part in a general fashion that is compatible with many
different decoder parts. Combine this with QUITS modular structure
for sliding window decoding}
%%%%%%%%%%%%%%%%
\subsection{Implementation of Sliding-Window Decoding}
\label{subsec:Implementation of Sliding-Window Decoding}
We build on the approach taken by \cite{huang_increasing_2024} and
\cite{gong_toward_2024}.
% High-level overview of Sliding-Window decoding
@@ -62,7 +360,8 @@ with processing'' some VNs)}
\hspace*{-98mm}%
\begin{tikzpicture}
\draw[{Latex}-{Latex}, line width=.7pt] (0, -0.75mm) -- (0, 5mm);
\draw[line width=1pt] (-1mm,-0.75mm) -- (3mm,-0.75mm);
\draw[line width=1pt] (-1mm,-0.75mm) --
(3mm,-0.75mm);
\draw[line width=1pt] (-1mm,5mm) -- (3mm,5mm);
\node[left] at (-2mm,2.125mm) {$\sim W$};
@@ -507,7 +806,8 @@ standard circuit-based depolarizing noise model, etc.)}
\foreach \W/\col/\mark in
{3/KITred/triangle*,4/KITblue/diamond*,5/KITorange/square*} {
\edef\temp{\noexpand
\addplot+[mark=\mark, solid, mark options={fill=\col}, \col]
\addplot+[mark=\mark, solid, mark
options={fill=\col}, \col]
table[
col sep=comma, x=physical_p,
y=LER_per_round,
@@ -587,7 +887,8 @@ standard circuit-based depolarizing noise model, etc.)}
\foreach \W/\col/\mark in
{3/KITred/triangle*,4/KITblue/diamond*,5/KITorange/square*} {
\edef\temp{\noexpand
\addplot+[mark=\mark, solid, mark options={fill=\col}, \col]
\addplot+[mark=\mark, solid, mark
options={fill=\col}, \col]
table[
col sep=comma, x=physical_p,
y=LER_per_round,
@@ -669,7 +970,8 @@ standard circuit-based depolarizing noise model, etc.)}
\foreach \F/\col/\mark in
{3/KITred/triangle*,2/KITblue/diamond*,1/KITorange/square*} {
\edef\temp{\noexpand
\addplot+[mark=\mark, solid, mark options={fill=\col}, \col]
\addplot+[mark=\mark, solid, mark
options={fill=\col}, \col]
table[
col sep=comma, x=physical_p,
y=LER_per_round,
@@ -736,7 +1038,8 @@ standard circuit-based depolarizing noise model, etc.)}
\foreach \W/\col/\mark in
{3/KITred/triangle,4/KITblue/diamond,5/KITorange/square} {
\edef\temp{\noexpand
\addplot+[mark=\mark, densely dashed, forget plot, \col]
\addplot+[mark=\mark, densely dashed,
forget plot, \col]
table[
col sep=comma, x=max_iter,
y=LER_per_round,
@@ -806,7 +1109,8 @@ standard circuit-based depolarizing noise model, etc.)}
\foreach \F/\col/\mark in
{3/KITred/triangle,2/KITblue/diamond,1/KITorange/square} {
\edef\temp{\noexpand
\addplot+[mark=\mark, densely dashed, forget plot, \col]
\addplot+[mark=\mark, densely dashed,
forget plot, \col]
table[
col sep=comma, x=max_iter,
y=LER_per_round,
@@ -843,7 +1147,8 @@ standard circuit-based depolarizing noise model, etc.)}
\ac{bb} code
under circuit-level noise.
$12$ rounds of syndrome extraction were performed and
standard circuit-based depolarizing noise was chosen as the noise model.
standard circuit-based depolarizing noise was chosen as the
noise model.
The physical error probabilty was fixed to $0.0025$.
}
\end{figure}
@@ -1001,7 +1306,8 @@ standard circuit-based depolarizing noise model, etc.)}
included both the messages on the Tanner graph and decimation
information.
$12$ rounds of syndrome extraction were performed and
standard circuit-based depolarizing noise was chosen as the noise model.
standard circuit-based depolarizing noise was chosen as the
noise model.
}
\end{figure}
@@ -1046,7 +1352,8 @@ standard circuit-based depolarizing noise model, etc.)}
\foreach \W/\col/\mark in
{3/KITred/triangle,4/KITblue/diamond,5/KITorange/square} {
\edef\temp{\noexpand
\addplot+[mark=\mark, densely dashed, forget plot, \col]
\addplot+[mark=\mark, densely dashed,
forget plot, \col]
table[
col sep=comma, x=max_iter,
y=LER_per_round,
@@ -1116,7 +1423,8 @@ standard circuit-based depolarizing noise model, etc.)}
\foreach \F/\col/\mark in
{3/KITred/triangle,2/KITblue/diamond,1/KITorange/square} {
\edef\temp{\noexpand
\addplot+[mark=\mark, densely dashed, forget plot, \col]
\addplot+[mark=\mark, densely dashed,
forget plot, \col]
table[
col sep=comma, x=max_iter,
y=LER_per_round,
@@ -1160,7 +1468,8 @@ standard circuit-based depolarizing noise model, etc.)}
The information used for the warm-start intialization
included only the messages on the Tanner graph.
$12$ rounds of syndrome extraction were performed and
standard circuit-based depolarizing noise was chosen as the noise model.
standard circuit-based depolarizing noise was chosen as the
noise model.
The physical error probabilty was fixed to $0.0025$.
}
\end{figure}
@@ -1314,7 +1623,8 @@ standard circuit-based depolarizing noise model, etc.)}
The information used for the warm-start intialization
included only the messages on the Tanner graph.
$12$ rounds of syndrome extraction were performed and
standard circuit-based depolarizing noise was chosen as the noise model.
standard circuit-based depolarizing noise was chosen as the
noise model.
}
\end{figure}
@@ -1359,7 +1669,8 @@ standard circuit-based depolarizing noise model, etc.)}
\foreach \W/\col/\mark in
{3/KITred/triangle,4/KITblue/diamond,5/KITorange/square} {
\edef\temp{\noexpand
\addplot+[mark=\mark, densely dashed, forget plot, \col]
\addplot+[mark=\mark, densely dashed,
forget plot, \col]
table[
col sep=comma, x=max_iter,
y=LER_per_round,
@@ -1429,7 +1740,8 @@ standard circuit-based depolarizing noise model, etc.)}
\foreach \F/\col/\mark in
{3/KITred/triangle,2/KITblue/diamond,1/KITorange/square} {
\edef\temp{\noexpand
\addplot+[mark=\mark, densely dashed, forget plot, \col]
\addplot+[mark=\mark, densely dashed,
forget plot, \col]
table[
col sep=comma, x=max_iter,
y=LER_per_round,
@@ -1473,8 +1785,10 @@ standard circuit-based depolarizing noise model, etc.)}
The information used for the warm-start intialization
included only the messages on the Tanner graph.
$12$ rounds of syndrome extraction were performed and
standard circuit-based depolarizing noise was chosen as the noise model.
standard circuit-based depolarizing noise was chosen as the
noise model.
The physical error probabilty was fixed to $0.0025$.
}
\end{figure}

View File

@@ -1 +1,5 @@
\chapter{Conclusion and Outlook}
\content{\textbf{Ideas for further research}}
\content{Softer way of decimating VNs}

View File

@@ -1,2 +1,5 @@
sed -i "s/Świerkowska/{\\\\'S}wierkowska/" bibliography.bib
sed -i "s/Héctor/H{\\\\'e}ctor/" bibliography.bib
sed -i "s/Bombín/Bomb{\\\\'i}n/" bibliography.bib
sed -i "s/Zémor/Z{\\\\'e}mor/" bibliography.bib
sed -Ezi "s/\s(abstract|note|urldate|url|keywords|file) = \{[^}]*(\{[^}]*\}[^}]*)*\},?\n//g" bibliography.bib

View File

@@ -27,6 +27,7 @@
\usepackage[noEnd=false]{algpseudocodex}
\usepackage{nicematrix}
\usepackage{colortbl}
\usepackage{cleveref}
\usetikzlibrary{calc, positioning, arrows, fit}
\usetikzlibrary{external}
@@ -38,6 +39,11 @@
\setcounter{MaxMatrixCols}{20}
\Crefname{equation}{}{}
\Crefname{section}{Section}{Sections}
\Crefname{subsection}{Subsection}{Subsections}
\Crefname{figure}{Figure}{Figures}
%
%
% Custom commands
@@ -45,7 +51,7 @@
%
\newcommand{\red}[1]{\textcolor{red}{#1}}
\newcommand{\content}[1]{\noindent\indent\red{[#1]}\\}
\newcommand{\content}[1]{\noindent\indent\red{[#1]\\}}
\newcommand{\figwidth}{10cm}
\newcommand{\figheight}{7.5cm}