44 Commits

Author SHA1 Message Date
4250f2a903 Add sliding-window decoding slide 2026-04-15 18:40:29 +02:00
e940e7ab9f Copy midterm presentation to final presentation 2026-04-15 13:54:21 +02:00
82f27fbede Modify .tmux_session.sh 2026-04-14 13:26:41 +02:00
a7785f6c75 Remove TODOs 2026-04-10 10:51:39 +02:00
9edd80cf28 LLM review 2026-04-10 09:05:24 +02:00
fc9dcbe11e Finish first draft of classical fundamentals 2026-04-10 08:45:36 +02:00
5c4bad30e2 Add SC-LDPC Tanner Graph, fix qualitative LDPC plot, add decoding paragraph 2026-04-08 23:58:34 +02:00
1d822dac8b Enable externalization; Finish writing SC-LDPC section; Add (unfinished) Tanner graph figure 2026-04-07 18:35:57 +02:00
7fc66a2c68 [results] Add LER/p plots for n_iter_sw = 200, n_iter_whole=1536 2026-04-02 09:10:32 +02:00
9ee3eb64e9 Fix typo: 2->1 2026-04-01 18:00:54 +02:00
2246915be9 Add whole v. windowed plot for constant num. of total iterations 2026-04-01 17:51:24 +02:00
5480f2ed7b Fix plotting W instead of F in figure 2026-04-01 15:18:54 +02:00
f21563251b Add soft v. hard over max num iterations plots 2026-04-01 15:16:35 +02:00
4ae8f66603 [thesis] Add BP text and error floor figure 2026-03-30 23:19:11 +02:00
1e3af6c69e [results] Plot simulation results for max_bp_iter=200 2026-03-30 22:34:57 +02:00
942f33582f [thesis] Top-align figures; add content outline; write sc intro 2026-03-30 17:49:43 +02:00
9f3bef606a Change results structure 2026-03-30 10:53:31 +02:00
3dd0863aae Make plots show per-round-LER; Add windowed soft info decoding vs whole decoding comparison 2026-03-30 10:49:20 +02:00
b60b1a2aed Add param-exploration.tex 2026-03-30 03:56:17 +02:00
7db31aec85 Add qec meeting results 2026-03-28 10:35:43 +01:00
631eeed5cd Add windowed vs whole plot 2026-03-27 11:20:50 +01:00
733577fbfb Add resources 2026-03-27 00:29:33 +01:00
6e819d650f Add outline and main content points for QEC Meeting presentation 2026-03-27 00:29:06 +01:00
1aa45bd741 Update bibliography, phrasing, add outlines for sections 2026-03-27 00:15:39 +01:00
168688e9a0 Add qec-meeting-presentation outline 2026-03-26 10:51:08 +01:00
0949255d78 Add latex-common as submodule 2026-03-26 10:50:54 +01:00
12dc737537 Fix build system 2026-03-24 16:24:04 +01:00
082666a8e2 Write ldpc section 2026-03-24 16:23:42 +01:00
1de3eb10fc Change cel slides submodule url 2026-03-23 01:06:07 +01:00
a3191fa0b7 Change phrasing 2026-03-23 01:00:00 +01:00
4c778e7bc6 Add cel slides template to TEXINPUTS in Makefile 2026-03-23 00:59:35 +01:00
fd0a354cf9 Minor changes 2026-03-23 00:58:52 +01:00
3b9f108ee5 Fix gitmodules url 2026-03-23 00:57:23 +01:00
993148d902 Check out newest version of thesis template 2026-03-23 00:57:06 +01:00
c3b0c194fe Make current thesis text use CEL template 2026-03-22 22:57:35 +01:00
7703fa4023 Add cel thesis template to lib/ as submodule 2026-03-22 22:21:06 +01:00
29968c8c4d Finish fist draft of binary linear block codes 2026-03-22 22:11:35 +01:00
e5dc0bc074 Add .tmux_session.sh 2026-03-21 18:58:32 +01:00
361e572a1b Add outline and first page of content for thesis 2026-03-21 18:57:32 +01:00
d8ca717021 Final corrections 2026-02-05 13:02:24 +01:00
44ffee48fd Remove empty slide; Fix TOC 2026-02-05 04:52:36 +01:00
723cfe438a Add graphics 2026-02-05 04:47:28 +01:00
ada33d4af1 Finish backup slide 2026-02-05 04:47:12 +01:00
54c08c5bc3 Finish performance evaluation and conclusion slides 2026-02-05 03:26:54 +01:00
42 changed files with 8929 additions and 64 deletions

8
.gitmodules vendored
View File

@@ -1,3 +1,9 @@
[submodule "lib/cel-slides-template-2025"] [submodule "lib/cel-slides-template-2025"]
path = lib/cel-slides-template-2025 path = lib/cel-slides-template-2025
url = git@gitlab.kit.edu:kit/cel/misc/cel-slides-template-2025.git url = ssh://git@100.123.176.93:2222/an.tsouchlos/cel-slides-template-2025.git
[submodule "lib/cel-thesis"]
path = lib/cel-thesis
url = ssh://git@100.123.176.93:2222/an.tsouchlos/cel-thesis.git
[submodule "lib/latex-common"]
path = lib/latex-common
url = ssh://git@100.123.176.93:2222/an.tsouchlos/latex-common.git

8
.tmux_session.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
SESSION=$1
tmux send-keys -t "$SESSION:1" "./.setup_local_env.sh" Enter
# tmux send-keys -t "$SESSION:1" "export TEXINPUTS=./lib/cel-slides-template-2025:\$TEXINPUTS" C-m
tmux send-keys -t "$SESSION:1" "trap './.clean_local_env.sh' EXIT" Enter
tmux send-keys -t "$SESSION:1" "nvim" Enter
tmux send-keys -t "$SESSION:1" "\\ll" Enter

View File

@@ -1,11 +1,13 @@
DOCUMENTS := $(patsubst src/%/main.tex,build/%.pdf,$(wildcard src/*/main.tex)) DOCUMENTS := build/midterm_presentation.pdf build/thesis.pdf
# DOCUMENTS := build/thesis.pdf
.PHONY: all .PHONY: all
all: $(DOCUMENTS) all: $(DOCUMENTS)
build/%.pdf: src/%/main.tex build/prepared build/%.pdf: src/%/main.tex build/prepared
latexmk $< TEXINPUTS=./lib/cel-slides-template-2025:$(dir $<):$$TEXINPUTS \
mv build/main.pdf $@ latexmk -outdir=build/$* $<
mv build/thesis/main.pdf $@
build/prepared: build/prepared:
mkdir -p build mkdir -p build

1
lib/cel-thesis Submodule

Submodule lib/cel-thesis added at f783ba56a1

1
lib/latex-common Submodule

Submodule lib/latex-common added at bded242752

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,195 @@
import warnings
from typing import Sequence
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as pt
from scipy.sparse import csc_matrix
from quits.decoder import spacetime
from quits.decoder import detector_error_model_to_matrix
from quits.qldpc_code import BbCode
from quits import ErrorModel, CircuitBuildOptions
def build_bb_circuit(N: int, num_rounds: int, p: float):
# fmt: off
if N == 72:
code = BbCode(l=6, m=6, A_x_pows=[3], A_y_pows=[1, 2], B_x_pows=[1, 2], B_y_pows=[3])
elif N == 90:
code = BbCode(l=15, m=3, A_x_pows=[9], A_y_pows=[1, 2], B_x_pows=[2, 7], B_y_pows=[0])
elif N == 108:
code = BbCode(l=9, m=6, A_x_pows=[3], A_y_pows=[1, 2], B_x_pows=[1, 2], B_y_pows=[3])
elif N == 144:
code = BbCode(l=12, m=6, A_x_pows=[3], A_y_pows=[1, 2], B_x_pows=[1, 2], B_y_pows=[3])
elif N == 288:
code = BbCode(l=12, m=12, A_x_pows=[3], A_y_pows=[2, 7], B_x_pows=[1, 2], B_y_pows=[3])
elif N == 360:
code = BbCode(l=30, m=6, A_x_pows=[9], A_y_pows=[1, 2], B_x_pows=[25, 26], B_y_pows=[3])
elif N == 756:
code = BbCode(l=21, m=18, A_x_pows=[3], A_y_pows=[10, 17], B_x_pows=[3, 19], B_y_pows=[5])
else:
assert False, "Unsupported code size"
# fmt: on
circuit = code.build_circuit(
error_model=ErrorModel(p, p, p, p),
num_rounds=num_rounds,
basis="Z",
circuit_build_options=CircuitBuildOptions(),
seed=1,
)
return code, circuit
def compute_num_windows(num_rounds: int, W: int, F: int):
"""
This was extracted from the function `sliding_window_circuit_mem()` of
`quits.decoder`.
"""
if 2 + num_rounds - W >= 0:
# num_cor_rounds = num of windows before the last window
num_cor_rounds = (2 + num_rounds - W) // F
# we can slide one more window if the remaining rounds > W
if (2 + num_rounds - W) % F != 0:
num_cor_rounds += 1
else:
num_cor_rounds = 0
warnings.warn(
"Window size larger than the syndrome extraction rounds: Doing"
" whole history correction"
)
return num_cor_rounds + 1
def get_overlap_info(
col_start_indices: Sequence, W: int, F: int, m: int, win_check_set: Sequence
):
def i_B(k: int):
return col_start_indices[k]
def i_E(k: int):
return i_B(k) + win_check_set[k].shape[1]
def j_B(k: int):
return F * (k) * m
def j_E(k: int):
return (F * k + W) * m
num_windows = len(win_check_set)
overlap_begin_positions = []
for k in range(num_windows - 1):
overlap_begin_positions.append((j_B(k + 1) - j_B(k), i_B(k + 1) - i_B(k)))
overlap_end_positions = []
for k in range(1, num_windows):
overlap_end_positions.append((j_E(k - 1) - j_B(k), i_E(k - 1) - i_B(k)))
return overlap_begin_positions, overlap_end_positions
def reconstruct_window_start_col_indices(win_observable_set: Sequence):
"""
This function effectively just reconstructs the `col_min` values of each
window, from the `spacetime()` function of `quits.decoder`.
"""
num_windows = len(win_observable_set)
col_mins = [0]
for k in range(num_windows - 1):
col_mins.append(col_mins[-1] + win_observable_set[k].shape[1])
return col_mins
num_rounds = 12
N = 72
p = 0.005
W = 5
F = 3
#
# Get detector error matrix and split it into windows
#
code, circuit = build_bb_circuit(N, num_rounds, p)
model = circuit.detector_error_model(decompose_errors=False)
check_matrix, observable_matrix, priors = detector_error_model_to_matrix(model)
num_windows = compute_num_windows(num_rounds, W, F)
win_check_set, win_observable_set, win_priors_set, win_update = spacetime(
circuit, code.hz, W, F, num_windows - 1
)
col_start_indices = reconstruct_window_start_col_indices(win_observable_set)
#
# Paint rectangles
#
custom_colors = [
(162/255, 34/255, 35/255),
(223/255, 155/255, 27/255),
(70/255, 100/255, 170/255),
(163/255, 16/255, 124/255),
]
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.spy(check_matrix.toarray())
colors = [custom_colors[i % len(custom_colors)] for i in range(num_windows)]
m = code.hz.shape[0]
# for win_idx in range(num_windows):
# col_start_idx = col_start_indices[win_idx]
# row_start_idx = win_idx * F * m
#
# ax.add_patch(
# pt.Rectangle(
# (col_start_idx, row_start_idx),
# win_check_set[win_idx].shape[1],
# win_check_set[win_idx].shape[0],
# fc="none",
# ec=colors[win_idx],
# )
# )
# overlap_begin_positions, overlap_end_positions = get_overlap_info(
# col_start_indices, W, F, m, win_check_set
# )
# for k in range(len(win_check_set) - 1):
# ax.add_patch(
# pt.Rectangle(
# (
# overlap_begin_positions[k][1] + col_start_indices[k],
# overlap_begin_positions[k][0] + F * k * m,
# ),
# win_check_set[k].shape[1] - overlap_begin_positions[k][1],
# win_check_set[k].shape[0] - overlap_begin_positions[k][0],
# fc=colors[k],
# ec=colors[k],
# alpha=0.3,
# )
# )
ax.set_xticks([])
ax.set_yticks([])
fig.savefig('72_bb_dem_no_windows.pdf', bbox_inches='tight')
plt.show()

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 515 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 229 KiB

Binary file not shown.

View File

@@ -64,8 +64,7 @@
\Ac{qec} is a field of research combining quantum mechanics and \Ac{qec} is a field of research combining quantum mechanics and
``classical'' communications engineering. ``classical'' communications engineering.
This chapter provides the relevant theoretical background on both of This chapter provides the relevant theoretical background on both of
these topics and subsequently, building on top of this, introduces the these topics and subsequently introduces the the fundamentals of \ac{qec}.
the fundamentals of \ac{qec}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% TODO: Is Quantum Information Theory the correct title here? Would someth % TODO: Is Quantum Information Theory the correct title here? Would someth

View File

@@ -53,7 +53,7 @@
\title{Fault Tolerant Quantum Error Correction} \title{Fault Tolerant Quantum Error Correction}
\subtitle{Master's Thesis Midterm Presentation} \subtitle{Master's Thesis Midterm Presentation}
\author[Andreas]{Andreas Tsouchlos} \author[ Andreas]{Andreas Tsouchlos}
\date[]{} \date[]{}
\DeclareFieldFormat{note}{} \DeclareFieldFormat{note}{}
@@ -62,7 +62,7 @@
\DeclareFieldFormat{doi}{} \DeclareFieldFormat{doi}{}
\DeclareFieldFormat[article,book,inproceedings]{urldate}{} \DeclareFieldFormat[article,book,inproceedings]{urldate}{}
\addbibresource{MA.bib} \addbibresource{src/midterm_presentation/MA.bib}
% %
% %
@@ -148,7 +148,7 @@
\DeclareAcronym{qldpc}{ \DeclareAcronym{qldpc}{
short=QLDPC, short=QLDPC,
long=quantum low - density parity - check, long=quantum low-density parity-check,
} }
\DeclareAcronym{scldpc}{ \DeclareAcronym{scldpc}{
@@ -156,6 +156,11 @@
long=spatially-coupled low-density parity-check long=spatially-coupled low-density parity-check
} }
\DeclareAcronym{ler}{
short=LER,
long=logical error rate,
}
% %
% %
% Document body % Document body
@@ -401,7 +406,7 @@
\citereferencemanual{NC10} \citereferencemanual{NC10}
} }
\visible<3>{ \visible<3>{
\item Superposition $\rightarrow$ multiple solutions to the \item Superposition $\rightarrow$ Multiple solutions to the
decoding problem decoding problem
(\schlagwort{quantum degeneracy}) (\schlagwort{quantum degeneracy})
\citereferencemanual{RWB$^+$20}} \citereferencemanual{RWB$^+$20}}
@@ -520,7 +525,7 @@
\begin{subfigure}{0.15\textwidth} \begin{subfigure}{0.15\textwidth}
\centering \centering
\begin{align*} \begin{align*}
\bm{H} \bm{y}^\text{T} = \bm{s} \bm{H} \bm{y}^\text{T} = \bm{H} \bm{e}^\text{T} = \bm{s}
\end{align*} \end{align*}
\vspace*{-5mm} \vspace*{-5mm}
\end{subfigure} \end{subfigure}
@@ -1568,18 +1573,20 @@
\citereferencemanual{HP23} \citereferencemanual{HP23}
\citereferencemanual{GCR24} \citereferencemanual{GCR24}
\end{itemize} \end{itemize}
\item Degraded \ac{bp} performance addressed with \visible<2>{
modification or extension \item Degraded \ac{bp} performance addressed with
\begin{itemize} modification or extension
\item \Ac{osd} post-processing \begin{itemize}
\citereferencemanual{RWB$^+$20} \item \Ac{osd} post-processing
\item Guided decimation \citereferencemanual{GCR24} \citereferencemanual{RWB$^+$20}
\item Neural approaches \item Guided decimation \citereferencemanual{GCR24}
\citereferencemanual{KL22} \item Neural approaches
\citereferencemanual{MSL$^+$25} \citereferencemanual{KL22}
\item Ensemble decoding \citereferencemanual{MSL$^+$25}
\citereferencemanual{KSW$^+$25} \item Ensemble decoding
\end{itemize} \citereferencemanual{KSW$^+$25}
\end{itemize}
}
\end{itemize} \end{itemize}
\vspace*{15mm} \vspace*{15mm}
@@ -1737,7 +1744,7 @@
\begin{frame} \begin{frame}
\frametitle{Future Work} \frametitle{Future Work}
\vspace*{-12mm} \vspace*{-15mm}
\begin{itemize} \begin{itemize}
\item Completed work \item Completed work
@@ -1745,6 +1752,7 @@
\item Review literature on fault-tolerant \ac{qec} \item Review literature on fault-tolerant \ac{qec}
using \acp{dem} using \acp{dem}
\item Identify research gap \item Identify research gap
\item Familiarize with software toolboxes
\end{itemize} \end{itemize}
\vspace*{7mm} \vspace*{7mm}
\item Research gap \item Research gap
@@ -1787,32 +1795,166 @@
% TODO: Organize sections properly % TODO: Organize sections properly
%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%
\section{Simulation Methodology} \section{Remarks on Evaluation}
\label{sec:Simulation Methodology} \label{sec:Remarks on Evaluation}
%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%
\subsection{The Code and Other Parameters} \subsection{Figures of Merit}
\label{subsec:The Code and Other Parameters} \label{subsec:Figures of Merit}
\begin{frame} \begin{frame}
\frametitle{Noise Models and Figures of Merit} \frametitle{Performance Evaluation}
\begin{itemize} % - Gong et al. don't actually analyze the latency ->
\item \red{For circuit-level noise, often, all error probabilities % Benchmarking against other methods would be interesting
are set to the same value for simulations
\citereferencemanual{FSG09}}
\item \red{There are other approaches (e.g., SDMB noise, SI noise)
\citereferencemanual{DTB$^+$25}}
\end{itemize}
\vspace*{10mm} % \item For circuit-level noise, use same
% \schlagwort{physical error rate} for all error
% locations \citereferencemanual{FSG09}
\begin{itemize} \vspace*{-15mm}
\item \red{Footprint plots}
\item \red{Other figure of merit (Look into ECCentric?)}
\end{itemize}
\vspace*{15mm} \begin{minipage}{0.35\textwidth}
\only<1>{
\begin{itemize}
\item Independent variables
\begin{itemize}
\item Physical error rate
\item CNOT infidelity
\item Total qubit count
\item \ldots
\end{itemize}
\end{itemize}
}
\only<2->{
\begin{itemize}
\item Independent variables
\begin{itemize}
\item \textbf{Physical error rate}
\item CNOT infidelity
\item \textbf{Total qubit count}
\item \ldots
\end{itemize}
\end{itemize}
}
\end{minipage}%
\begin{minipage}{0.65\textwidth}
\begin{itemize}
\only<1>{
\item Noise models
\begin{itemize}
\item Standard circuit-based depolarizing noise
\citereferencemanual{FSG09}
\item Superconductor inspired (SI1000)
\citereferencemanual{GNF$^+$21}
\item Entangling Measurements (EM3)
\citereferencemanual{GNF$^+$21}
\item \ldots
\end{itemize}
}
\only<2->{
\item Noise models
\begin{itemize}
\item \textbf{Standard circuit-based depolarizing noise}
\citereferencemanual{FSG09}
\item Superconductor inspired (SI1000)
\citereferencemanual{GNF$^+$21}
\item Entangling Measurements (EM3)
\citereferencemanual{GNF$^+$21}
\item \ldots
\end{itemize}
}
\end{itemize}
\end{minipage}
\vspace{5mm}
\visible<3->{
\begin{itemize}
\item Degeneracy, information stored in correlations
$\rightarrow$ Consider \schlagwort{\acl{ler}} (LER)
\end{itemize}
}
\visible<4->{
\begin{itemize}
\item Types of benchmarking plots
\end{itemize}
\vspace*{5mm}
\begin{figure}[H]
\centering
\begin{subfigure}{0.35\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
domain=-5:5,
width=7cm,
height=5.5cm,
xticklabels=\empty,
yticklabels=\empty,
xlabel={Physical error rate},
xlabel style={yshift=5mm},
ylabel={LER},
ylabel style={yshift=-5mm},
grid,
]
\addplot+[
mark=none,
kit-red,
line width=2pt,
]
table[row sep=crcr] {
x y \\
1.134800559068837 0.5575221183357257 \\
2.0632737437615223 0.9764009116710485 \\
2.861072612292603 1.7787608707489788 \\
3.7551580964997053 2.8407080379684153 \\
4.264098875196703 3.513274267363004 \\
4.573589936760932 3.9911505302955272 \\
4.903713970055305 4.268436552233389 \\
};
\end{axis}
\end{tikzpicture}
\end{subfigure}%
\begin{subfigure}{0.35\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
domain=-5:5,
width=7cm,
height=5.5cm,
xticklabels=\empty,
yticklabels=\empty,
xlabel={Total qubit count},
xlabel style={yshift=5mm},
ylabel={LER},
ylabel style={yshift=-5mm},
grid,
]
\addplot+[
mark=none,
kit-blue,
line width=2pt,
]
table[row sep=crcr] {
x y \\
1.147643096789246 3.8430493581808607 \\
1.7245658892318043 2.762331811591747 \\
2.3573205843145306 2.3587443650766753 \\
2.9156332708646624 1.560537992857378 \\
3.6352360073136527 1.0403588210329737 \\
4.392060012189421 0.7130042787942606 \\
};
\end{axis}
\end{tikzpicture}
\end{subfigure}
\end{figure}
}
\vspace*{4mm}
\addreferencesmanual \addreferencesmanual
{FSG09}{ {FSG09}{
@@ -1820,30 +1962,44 @@
``High-threshold universal quantum computation on the surface ``High-threshold universal quantum computation on the surface
code,'' \emph{Physical Review}, 2009. code,'' \emph{Physical Review}, 2009.
} }
{DTB$^+$25}{ {GNF$^+$21}{
P.- J. H. S. Derks et al., ``Designing fault-tolerant C. Gidney et al., ``A Fault-Tolerant Honeycomb Memory'',
circuits using detector error models,'' \emph{Quantum}, 2025. \emph{Quantum}, 2021.
} }
\stopreferencesmanual \stopreferencesmanual
\end{frame} \end{frame}
%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%
\subsection{Proposed Methodology} \subsection{Conclusion and Outlook}
\label{subsec:Proposed Methodology} \label{subsec:Conclusion and Outlook}
\begin{frame} \begin{frame}
\frametitle{Conclusion} \frametitle{Conclusion and Outlook}
\vspace*{-15mm} \vspace*{-10mm}
\begin{minipage}[c]{0.65\textwidth} \begin{minipage}[c]{0.65\textwidth}
\begin{itemize} \begin{itemize}
\item \red{Noise model} \item Problem setting
\item \red{Memory or stability experiment} \begin{itemize}
\item \red{Figure of merit: Footprint plot} \item Research area: Decoder design for \acp{dem}
\item \red{Comparison with BB code also simulated by under circuit-level noise
\citereferencemanual{GCR24}} \item Research gap: Consideration of \acp{dem} as
\item \red{Comparison with surface code} \ac{scldpc} codes
\end{itemize}
\vspace*{5mm}
\item Future work
\begin{itemize}
\item Modify existing decoder to pass soft information
\item Test different \ac{bp} variations
\item \ldots
\end{itemize}
\vspace*{5mm}
\item Parameters
\begin{itemize}
\item Use standard depolarizing noise for comparability
\item Compare performance with other \ac{bb} code decoders
\end{itemize}
\end{itemize} \end{itemize}
\end{minipage}% \end{minipage}%
\begin{minipage}[c]{0.35\textwidth} \begin{minipage}[c]{0.35\textwidth}
@@ -1852,9 +2008,13 @@
\begin{figure}[H] \begin{figure}[H]
\centering \centering
\begin{tikzpicture}[every node/.style={scale=10}] \vspace*{-25mm}
\node at (0, 0) \begin{tikzpicture}
\node[scale=10] at (0, 0)
{\textcolor{kit-blue}{{\fontfamily{phv}\selectfont ?}}}; {\textcolor{kit-blue}{{\fontfamily{phv}\selectfont ?}}};
\node[align=center] at (0,-5) {Thank you for your
attention! \\ Any questions?};
\end{tikzpicture} \end{tikzpicture}
\end{figure} \end{figure}
\end{minipage} \end{minipage}
@@ -1868,7 +2028,7 @@
\begin{frame} \begin{frame}
\frametitle{System Level Overview} \frametitle{System Level Overview}
\vspace*{-15mm} \vspace*{-10mm}
\begin{figure}[H] \begin{figure}[H]
\centering \centering
@@ -1933,7 +2093,7 @@
\end{subfigure} \end{subfigure}
\end{figure} \end{figure}
% \vspace*{-2mm} \vspace*{5mm}
\addreferencesmanual \addreferencesmanual
{ZZC$^+$23}{ {ZZC$^+$23}{
@@ -1952,11 +2112,30 @@
\begin{frame} \begin{frame}
\frametitle{Guided Decimation Guessing Decoding} \frametitle{Guided Decimation Guessing Decoding}
\begin{itemize} \begin{minipage}{0.57\textwidth}
\item \red{Explain paper} \begin{itemize}
\end{itemize} \item BP guided decimation (BPGD) \\
$\rightarrow$ Iteratively fix most reliable variable node (VN)
\vspace*{10mm}
\item \schlagwort{Guided decimation guessing} (GDG)
\citereferencemanual{GCR24}
\begin{itemize}
\item Choose VN with \schlagwort{lowest}
log-likelihood ratio
\item Choose VN to fix based on \schlagwort{LLR history}
\item Explore both VN values in parallel
(\schlagwort{guessing})
\end{itemize}
\end{itemize}
\end{minipage}%
\begin{minipage}{0.43\textwidth}
\begin{figure}[H]
\centering
\includegraphics[scale=1.3]{res/gdg.pdf}
\end{figure}
\end{minipage}%
\vspace*{25mm} \vspace*{30mm}
\addreferencesmanual \addreferencesmanual
{GCR24}{ {GCR24}{
@@ -1967,6 +2146,52 @@
\stopreferencesmanual \stopreferencesmanual
\end{frame} \end{frame}
\begin{frame}
\frametitle{The Quantum Error Correcting Landscape}
\vspace*{-10mm}
\begin{itemize}
\item Taxonomy of main QEC code families \citereferencemanual{SPG$^+$25}
\end{itemize}
\vspace*{2mm}
\begin{figure}[H]
\centering
\includegraphics[scale=2.5]{res/taxonomy.pdf}
\end{figure}
\begin{itemize}
\item Surface code is the industry standard for
experimental implementations, but has poor encoding
efficiency \citereferencemanual{BCG$^+$24}
\item \Ac{qldpc} codes particularly interesting because of
\begin{itemize}
\item Constant overhead scaling \citereferencemanual{Got14}
\item Linear distance scaling \citereferencemanual{BCG$^+$24}
\end{itemize}
\end{itemize}
\vspace*{15mm}
\addreferencesmanual
{SPG$^+$25}{
A. Swierkowska et al., ``ECCentric: An Empirical
Analysis of Quantum Error Correction Codes'',
\emph{arXiv:2511.01062v1}, 2025.
}
{BCG$^+$24}{
S. Bravyi et al., ``High-threshold and low-overhead
fault-tolerant quantum memory,'' \emph{Nature}, 2024.
}
{Got14}{
D. Gottesman, ``Fault-Tolerant Quantum Computation with
Constant Overhead'', \emph{arXiv:1310.2984}, 2014.
}
\stopreferencesendmanual
\end{frame}
% TODO: Is this really necessary? % TODO: Is this really necessary?
% \begin{frame} % \begin{frame}
% \frametitle{The Quantum Error Correction Landscape} % \frametitle{The Quantum Error Correction Landscape}

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,417 @@
\documentclass[xcolor=dvipsnames,t,aspectratio=169]{beamer}
\setbeamersize{text margin left=1cm,text margin right=1cm,}
\usepackage{subcaption}
\usepackage{minted}
\usepackage[minted, most]{tcolorbox}
\usepackage{amsmath}
\usepackage{bm}
\usepackage[dvipsnames]{xcolor}
\usepackage{siunitx}
\usepackage{graphics}
\usepackage{xfp}
\usepackage{quantikz}
\usepackage[T1]{fontenc}
\usepackage[scaled=.92]{helvet}
\usepackage{tikz}
\usetikzlibrary{positioning, arrows.meta, shapes.misc,
decorations.pathreplacing, calc}
\usepackage{pgfplots}
\usepackage{pgffor}
\usepgfplotslibrary{groupplots}
% \usetikzlibrary{external}
% \tikzexternalize
%
%
% Set up the theme
%
%
% General and beamer options
\makeatletter
\addtobeamertemplate{author}{\centering}{}
\addtobeamertemplate{date}{\vspace*{-0.85cm}\centering}{}
\makeatother
\setbeamertemplate{enumerate items}[circle]
\setbeamertemplate{itemize items}[circle]
\setbeamercolor{title}{fg=black}
\setbeamercolor{frametitle}{fg=black}
\setbeamertemplate{footline}[frame number]
\setbeamertemplate{navigation symbols}{}
% \AtBeginSection[]{
% \begin{frame}
% \frametitle{Overview}
% \tableofcontents[currentsection]
% \end{frame}
% }
%
% \AtBeginSubsection[]{
% \begin{frame}
% \frametitle{Overview}
% \tableofcontents[currentsubsection]
% \end{frame}
% }
\fontfamily{cmss}\selectfont
\newcommand\wider[2][3em]{%
\makebox[\linewidth][c]{%
\begin{minipage}{\dimexpr\textwidth+#1\relax}
\raggedright#2
\end{minipage}%
}%
}
% Code listings
\usemintedstyle{gruvbox-dark}
\definecolor{gruvbox-bg}{HTML}{282828}
% PGF color scheme
\input{lib/latex-common/common.tex}
\pgfplotsset{colorscheme/rocket}
%
%
% Configure document
%
%
\title{Soft-Information Aware Sliding-Window Decoding}
\author{Andreas Tsouchlos}
\date{March 26, 2026}
%
%
% Custom commands
%
%
\newcommand{\red}[1]{\textcolor{red}{#1}}
\newcommand{\linkpython}[1]{
\scalebox{0.7}{
\begingroup
\setbox0=\hbox{\includegraphics[scale=0.18]{\res/python-logo.pdf}}%
\parbox{\wd0}{\box0}\endgroup
\hspace{1mm}
\colorbox{gruvbox-bg}{%
\textcolor{white}{%
\texttt{#1}%
}%
}%
}%
}
%
%
% Content
%
%
\begin{document}
\frame[plain]{\titlepage}
\setcounter{framenumber}{0}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Motivation and Core Idea}
\label{sec:Motivation and Core Idea}
\newsavebox{\innercircuit}
\begin{frame}
\frametitle{Sliding-Window Decoding for Fault-Tolerant QEC}
% \vspace*{-5mm}
\begin{figure}
\scalebox{0.8}{
\savebox{\innercircuit}{
\begin{tikzpicture}[
block/.style={draw, rectangle, minimum
height=1cm, minimum width=1.2cm, align=center,
fill=white, font=\small},
container/.style={draw, thick, inner sep=0.6cm}
]
% Internal blocks
\node (se1) [block]
{$\text{SE}_1$ \\ + noise};
\node (se2) [block, right=0.5cm of se1]
{$\text{SE}_2$ \\ + noise};
\node (dots) [right=0.3cm of se2] {\dots};
% Wires
\draw (se1.west) -- ++(-0.4,0);
\draw (se1.east) -- (se2.west);
\draw (se2.east) -- (dots.west);
\draw (dots.east) -- ++(0.4,0);
% The "Circuit" container
\node (box) [
container, fit=(se1) (se2) (dots),
label={[anchor=north west, xshift=2pt]
north west:Complete circuit}
] {};
\end{tikzpicture}
}
\begin{tikzpicture}[
every edge/.style = {draw, -{latex}}
]
\node (pcm) {Code~/~$\bm{H}_\text{PCM}$};
\node[
rectangle,
draw=black,
minimum width=2cm, minimum height=1cm,
right=of pcm,
align=center
] (sec) {Single\\ SE Circ.};
\node[right=of sec] (qc) {\usebox{\innercircuit}};
\node[right=of qc] (dem) {$\bm{H}_\text{DEM}$};
\draw (pcm.east) edge[bend left] (sec.west);
\draw (sec.east) edge[bend left] ($(qc.west) + (0.22,0)$);
\draw ($(qc.east) + (-0.22,0)$) edge[bend left]
node[midway, above] {Stim} (dem.west);
\end{tikzpicture}
}
\end{figure}
\only<1>{
\begin{figure}[H]
\centering
\includegraphics[scale=0.55]{./res/pcm.png}
\end{figure}
}
\only<2->{
\begin{figure}[H]
\centering
\includegraphics[scale=0.55]{./res/windowing.png}
\end{figure}
}
\vspace*{-5mm}
\visible<2->{
\begin{align*}
\colorbox{red!20}{$\bm{H}_1 \bm{e}_1 = \bm{s}_1$}
\hspace{2mm}\rightarrow\hspace{2mm} \hat{\bm{e_1}} = \cdots
\hspace{2mm}\rightarrow\hspace{2mm} \bm{s}_2' = \cdots \\
\colorbox{ForestGreen!20}{$\bm{H}_2 \bm{e}_2 = \bm{s}_2'$}
\hspace{2mm}\rightarrow\hspace{2mm} \hat{\bm{e_2}} = \cdots
\hspace{2mm}\rightarrow\hspace{2mm} \bm{s}_3' = \cdots \\
\colorbox{blue!20}{$\bm{H}_3 \bm{e}_3 = \bm{s}_3'$}
\hspace{2mm}\rightarrow\hspace{2mm} \hat{\bm{e_3}} = \cdots
\hspace{2mm}\rightarrow\hspace{2mm} \bm{s}_4' = \cdots
% &\hspace{2.25mm}\vdots
\end{align*}
}
\end{frame}
\begin{frame}
\frametitle{Similarities to Spatially-Coupled LDPC Codes}
\begin{figure}[H]
\centering
\includegraphics[scale=0.1]{./res/sc-tanner-graph.png}
\includegraphics[scale=0.1]{./res/sc-pcm.png}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Current Results}
\label{sec:Current Results}
\begin{frame}
\frametitle{An ``Upper Bound'' on the Performance Gain}
\begin{minipage}{0.65\textwidth}
\begin{figure}[H]
% \centering
\hspace*{-8mm}
\begin{tikzpicture}
\begin{axis}[
width=10cm,
height=8cm,
ymode=log,
ylabel={LER},
xlabel={Physical error rate},
legend pos=south east,
legend style={
cells={anchor=west},
cells={align=left},
},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
grid=both,
xtick={0.001,0.0015,...,0.004},
xmin=0.001, xmax=0.004,
ymin=1e-2, ymax=1,
]
\addplot+[NavyBlue, mark=none, line width=1pt]
table[col sep=comma, x=p, y=LER_sw]
{res/whole_vs_windowed_spa.csv};
\addlegendentry{Windowed - SPA}
\addplot+[BurntOrange, mark=none, line width=1pt]
table[col sep=comma, x=p, y=LER_sw]
{res/whole_vs_windowed_min_sum.csv};
\addlegendentry{Windowed - Min-Sum}
\addplot+[NavyBlue, mark=none, line width=1pt,
densely dashed]
table[col sep=comma, x=p, y=LER_whole]
{res/whole_vs_windowed_spa.csv};
\addlegendentry{Whole - SPA}
\addplot+[BurntOrange, mark=none, line width=1pt,
densely dashed]
table[col sep=comma, x=p, y=LER_whole]
{res/whole_vs_windowed_min_sum.csv};
\addlegendentry{Whole - Min-Sum}
\end{axis}
\end{tikzpicture}
\end{figure}
\end{minipage}%
\begin{minipage}{0.35\textwidth}
\begin{itemize}
\item $[[144,12,12]]$ BB code
\item $n_\text{rounds} = 12$
\item $W=3$, $F=1$
\end{itemize}
\end{minipage}
\end{frame}
\begin{frame}
\frametitle{Soft- vs. Hard-Information Decoding Behavior I}
\begin{minipage}{0.65\textwidth}
\begin{figure}[H]
% \centering
\hspace*{-8mm}
\begin{tikzpicture}
\begin{axis}[
width=10cm,
height=8cm,
ymode=log,
ylabel={LER},
xlabel={Physical error rate},
legend pos=south east,
legend style={
cells={anchor=west},
cells={align=left},
},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
grid=both,
xtick={0.001,0.0015,...,0.004},
xmin=0.001, xmax=0.004,
ymin=1e-2, ymax=1,
]
\addplot+[BurntOrange, mark=none,
line width=1pt]
table[col sep=comma, x=p, y=LER_soft]
{res/hard_vs_soft_spa.csv};
\addlegendentry{Soft info - SPA}
\addplot+[BurntOrange, densely dashed, mark=none,
line width=1pt]
table[col sep=comma, x=p, y=LER_hard]
{res/hard_vs_soft_spa.csv};
\addlegendentry{Hard info - SPA}
\addplot+[NavyBlue, mark=none, line width=1pt]
table[col sep=comma, x=p, y=LER_soft]
{res/hard_vs_soft_min_sum.csv};
\addlegendentry{Soft info - Min-Sum}
\addplot+[NavyBlue, densely dashed, mark=none,
line width=1pt]
table[col sep=comma, x=p, y=LER_hard]
{res/hard_vs_soft_min_sum.csv};
\addlegendentry{Hard info - Min-Sum}
\end{axis}
\end{tikzpicture}
\end{figure}
\end{minipage}%
\begin{minipage}{0.35\textwidth}
\begin{itemize}
\item $[[144,12,12]]$ BB code
\item $n_\text{rounds} = 12$
\item $W=3$, $F=1$
\end{itemize}
\end{minipage}
% \begin{itemize}
% \item \red{Soft vs hard for min sum and spa (4 plots)}
% \item \red{Different window sizes}
% \end{itemize}
\end{frame}
% \begin{frame}
% \frametitle{Soft- vs. Hard-Information Decoding Behavior II}
%
% \begin{itemize}
% \item \red{Comparison of numbers of iterations for soft vs hard}
% \item \red{Convergence analysis}
% \end{itemize}
% \end{frame}
%
% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% \section{Future Work and Open Questions}
% \label{sec:Future Work and Open Questions}
%
% \begin{frame}
% \frametitle{Future Work}
%
% \vspace*{15mm}
%
% \begin{itemize}
% \item Look at behavior during iterations
% \item Move from BP to BPGD
%
% \vspace{15mm}
%
% \item Q: Theoretically rigorous proof for the
% ``window-friendly'' structure of the detector error matrix?
% \item Q: Min-sum seems to perform better than SPA? \red{Make
% sure this question even makes sense}
% \end{itemize}
% \end{frame}
%
% % TODOs
% % - Whole vs windowed for min sum and spa
% % - Basic implementation
% % - Choose parameters (e.g., window sizes)
% % - soft vs hard for min sum and spa (4 plots)
% % - Basic implementation
% % - Choose parameters (e.g., window sizes)
% % - Comparison of numbers of iterations for soft vs hard
% % - Basic implementation (more or less done)
% % - Choose parameters
% % - Convergence analysis
% % - Basic implementation (more or less done)
% % - Choose parameters
% % - Make sure min sum vs spa question makes sense
% % - Think of a few words for each slide (and take notes)
\end{document}

View File

@@ -0,0 +1,8 @@
p,LER_hard,LER_soft
0.001,0.08,0.028
0.0015,0.174,0.076
0.002,0.348,0.218
0.0025,0.54,0.38
0.003,0.716,0.564
0.0035,0.862,0.75
0.004,0.948,0.906
1 p LER_hard LER_soft
2 0.001 0.08 0.028
3 0.0015 0.174 0.076
4 0.002 0.348 0.218
5 0.0025 0.54 0.38
6 0.003 0.716 0.564
7 0.0035 0.862 0.75
8 0.004 0.948 0.906

View File

@@ -0,0 +1,7 @@
p,LER_hard,LER_soft
0.001,0.098,0.038
0.0015,0.174,0.11
0.002,0.394,0.268
0.0025,0.536,0.376
0.003,0.688,0.574
0.0035,0.822,0.744
1 p LER_hard LER_soft
2 0.001 0.098 0.038
3 0.0015 0.174 0.11
4 0.002 0.394 0.268
5 0.0025 0.536 0.376
6 0.003 0.688 0.574
7 0.0035 0.822 0.744

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 146 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 340 KiB

View File

@@ -0,0 +1,8 @@
p,LER_whole,LER_sw
0.001,0.046,0.086
0.0015,0.11,0.2
0.002,0.185,0.345
0.0025,0.346,0.546
0.003,0.587,0.721
0.0035,0.732,0.852
0.004,0.871,0.941
1 p LER_whole LER_sw
2 0.001 0.046 0.086
3 0.0015 0.11 0.2
4 0.002 0.185 0.345
5 0.0025 0.346 0.546
6 0.003 0.587 0.721
7 0.0035 0.732 0.852
8 0.004 0.871 0.941

View File

@@ -0,0 +1,8 @@
p,LER_whole,LER_sw
0.001,0.072,0.101
0.0015,0.128,0.234
0.002,0.213,0.352
0.0025,0.374,0.517
0.003,0.544,0.676
0.0035,0.711,0.823
0.004,0.83,0.905
1 p LER_whole LER_sw
2 0.001 0.072 0.101
3 0.0015 0.128 0.234
4 0.002 0.213 0.352
5 0.0025 0.374 0.517
6 0.003 0.544 0.676
7 0.0035 0.711 0.823
8 0.004 0.83 0.905

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

1
src/results/2026-03-29/lib Symbolic link
View File

@@ -0,0 +1 @@
/home/andreas/workspace/private/ma-thesis/lib

View File

@@ -0,0 +1,641 @@
\documentclass{article}
\usepackage[a4paper,left=2cm,right=2cm,top=2.5cm,bottom=2cm]{geometry}
\usepackage{float}
\usepackage{amsmath}
\usepackage{hyperref}
\usepackage{amsfonts}
\usepackage{mleftright}
\usepackage{bm}
\usepackage{tikz}
\usepackage{subcaption}
\usepackage{xcolor}
\usepackage{pgfplots}
\usepackage{pgfplotstable}
\pgfplotsset{compat=newest}
\usepackage{acro}
\usepackage{braket}
\usepackage[
backend=biber,
style=ieee,
sorting=nty,
]{biblatex}
\usepackage{todonotes}
\usepackage{lipsum}
\usetikzlibrary{calc, positioning}
\input{lib/latex-common/common.tex}
\pgfplotsset{colorscheme/matplotlib}
%
%
% Custom commands
%
%
\newcommand{\red}[1]{\textcolor{red}{#1}}
\newcommand{\figwidth}{8cm}
\newcommand{\figheight}{6.5cm}
%
%
% Acronyms
%
%
\DeclareAcronym{qec}{
short=QEC,
long=quantum error correction
}
\addbibresource{src/proposal/MA.bib}
%
%
% Content
%
%
\title{Results: 2026-03-29}
\author{Andreas Tsouchlos}
\date{}
\begin{document}
\maketitle
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Whole vs Windowed decoding}
\label{sec:Whole vs Windowed decoding}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=1e-3, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[
mark=o, line width=1pt, densely dashed, \col,
]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=\W/2026-03-29_23-39-18/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~whole}
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_sw})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=\W/2026-03-29_23-39-18/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~windowed}
}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=1e-3, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=\W/2026-03-29_23-39-18/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~whole}
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_sw})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=\W/2026-03-29_23-39-18/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~windowed}
}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code with $F=1,
n_\text{iter,BP}=32,n_\text{rounds}=12$}
\end{figure}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=1e-3, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=\F,+system.W=5/2026-03-29_23-39-18/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~whole}
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_sw})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=\F,+system.W=5/2026-03-29_23-39-18/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~windowed}
}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=1e-3, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=\F,+system.W=5/2026-03-29_23-39-18/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~whole}
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_sw})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=\F,+system.W=5/2026-03-29_23-39-18/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~windowed}
}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code with $W=5,
n_\text{iter,BP}=32,n_\text{rounds}=12$}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Soft- vs Hard- Information Decoding}
\label{sec:Soft- vs Hard- Information Decoding}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=\W/2026-03-30_00-06-26/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~hard}
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=*, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=\W/2026-03-30_00-06-26/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~soft}
}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=\W/2026-03-30_00-06-26/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~hard}
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=*, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=\W/2026-03-30_00-06-26/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~soft}
}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code with $F=1,
n_\text{iter,BP}=32,n_\text{rounds}=12$}
\end{figure}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=\F,+system.W=5/2026-03-30_00-06-26/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~hard}
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=*, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=\F,+system.W=5/2026-03-30_00-06-26/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~soft}
}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=\F,+system.W=5/2026-03-30_00-06-26/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~hard}
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=*, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=\F,+system.W=5/2026-03-30_00-06-26/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~soft}
}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code with $W=5,
n_\text{iter,BP}=32,n_\text{rounds}=12$}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Soft-Information Decoding: Best Performance}
\label{sec:Soft-Information Decoding: Best Performance}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\addplot+[mark=o, line width=1pt, densely dashed, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_00-06-26/LERs.csv};
\addlegendentryexpanded{Windowed, hard}
\addplot+[mark=*, line width=1pt, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_00-06-26/LERs.csv};
\addlegendentryexpanded{Windowed, soft}
\addplot+[mark=o, line width=1pt, densely dashed, scol1]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=5/2026-03-29_23-39-18/LERs.csv};
\addlegendentryexpanded{Whole window}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\addplot+[mark=o, line width=1pt, densely dashed, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_00-06-26/LERs.csv};
\addlegendentryexpanded{Windowed, hard}
\addplot+[mark=*, line width=1pt, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_00-06-26/LERs.csv};
\addlegendentryexpanded{Windowed, soft}
\addplot+[mark=o, line width=1pt, densely dashed, scol1]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=5/2026-03-29_23-39-18/LERs.csv};
\addlegendentryexpanded{Whole window}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code,
$W=5, F=1, n_\text{iter,BP} = 32$}
\end{figure}
\end{document}

1
src/results/2026-03-29/src Symbolic link
View File

@@ -0,0 +1 @@
/home/andreas/workspace/private/ma-thesis/src

View File

@@ -0,0 +1,753 @@
\documentclass{article}
\usepackage[a4paper,left=2cm,right=2cm,top=2.5cm,bottom=2cm]{geometry}
\usepackage{float}
\usepackage{amsmath}
\usepackage{hyperref}
\usepackage{amsfonts}
\usepackage{mleftright}
\usepackage{bm}
\usepackage{tikz}
\usepackage{subcaption}
\usepackage{xcolor}
\usepackage{pgfplots}
\usepackage{pgfplotstable}
\pgfplotsset{compat=newest}
\usepackage{acro}
\usepackage{braket}
\usepackage[
backend=biber,
style=ieee,
sorting=nty,
]{biblatex}
\usepackage{todonotes}
\usepackage{lipsum}
\usetikzlibrary{calc, positioning}
\input{lib/latex-common/common.tex}
\pgfplotsset{colorscheme/matplotlib}
%
%
% Custom commands
%
%
\newcommand{\red}[1]{\textcolor{red}{#1}}
\newcommand{\figwidth}{8cm}
\newcommand{\figheight}{6.5cm}
%
%
% Acronyms
%
%
\DeclareAcronym{qec}{
short=QEC,
long=quantum error correction
}
\addbibresource{src/proposal/MA.bib}
%
%
% Content
%
%
\title{Results: 2026-03-30}
\author{Andreas Tsouchlos}
\date{}
\begin{document}
\maketitle
% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% \section{Soft vs Hard Information Decoding}
% \label{sec:Soft vs Hard Information Decoding}
%
% \begin{figure}[H]
% \centering
%
% \begin{subfigure}{0.5\textwidth}
% \begin{tikzpicture}
% \begin{axis}[
% width=\figwidth,
% height=\figheight,
% ymode=log,
% legend style={
% cells={anchor=west},
% cells={align=left},
% },
% enlargelimits=false,
% ymin=1e-3, ymax=2.5e-1,
% grid=both,
% legend pos = south west,
% xtick={8, 32, 64, 96, 128, 160, 192},
% xlabel={Max BP iterations},
% ylabel={Per-round-LER},
% ]
% \foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
% \edef\temp{\noexpand
% \addplot+[
% mark=o, line width=1pt, densely dashed, \col,
% ]
% table[
% col sep=comma, x=max_bp_iter,
% y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
% ]
% {/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=\W/2026-03-30_13-49-44/LERs.csv};
% }
% \temp
%
% \addlegendentryexpanded{$W = \W$~hard}
% }
%
% \foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
% \edef\temp{\noexpand
% \addplot+[mark=o, line width=1pt, \col]
% table[
% col sep=comma, x=max_bp_iter,
% y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
% ]
% {/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=\W/2026-03-30_13-49-44/LERs.csv};
% }
% \temp
%
% \addlegendentryexpanded{$W = \W$~soft}
% }
% \end{axis}
% \end{tikzpicture}
% \caption{Min-Sum}
% \end{subfigure}%
% \begin{subfigure}{0.5\textwidth}
% \begin{tikzpicture}
% \begin{axis}[
% width=\figwidth,
% height=\figheight,
% ymode=log,
% legend style={
% cells={anchor=west},
% cells={align=left},
% },
% enlargelimits=false,
% ymin=1e-3, ymax=2.5e-1,
% grid=both,
% legend pos = south west,
% xtick={8, 32, 64, 96, 128, 160, 192},
% xlabel={Max BP iterations},
% ylabel={Per-round-LER},
% ]
% \foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
% \edef\temp{\noexpand
% \addplot+[mark=o, line width=1pt, densely dashed, \col]
% table[
% col sep=comma, x=max_bp_iter,
% y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
% ]
% {/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=\W/2026-03-30_13-49-44/LERs.csv};
% }
% \temp
%
% \addlegendentryexpanded{$W = \W$~hard}
% }
%
% \foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
% \edef\temp{\noexpand
% \addplot+[mark=o, line width=1pt, \col]
% table[
% col sep=comma, x=max_bp_iter,
% y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
% ]
% {/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=\W/2026-03-30_13-49-44/LERs.csv};
% }
% \temp
%
% \addlegendentryexpanded{$W = \W$~windowed}
% }
% \end{axis}
% \end{tikzpicture}
% \caption{SPA}
% \end{subfigure}
%
% \caption{BP simulations for the [[144,12,12]]-BB code with $F=1,
% n_\text{iter,BP}=32,n_\text{rounds}=12$}
% \end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Whole vs Windowed decoding}
\label{sec:Whole vs Windowed decoding}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=4e-5, ymax=2e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[
mark=o, line width=1pt, densely dashed, \col,
]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=\W/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~whole}
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_sw})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=\W/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~windowed}
}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=4e-5, ymax=2e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=\W/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~whole}
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_sw})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=\W/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~windowed}
}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code with $F=1,
n_\text{iter,BP}=200,n_\text{rounds}=12$}
\end{figure}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=4e-5, ymax=2e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=\F,+system.W=5/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~whole}
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_sw})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=\F,+system.W=5/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~windowed}
}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=4e-5, ymax=2e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=\F,+system.W=5/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~whole}
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_sw})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=\F,+system.W=5/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~windowed}
}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code with $W=5,
n_\text{iter,BP}=200,n_\text{rounds}=12$}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Soft- vs Hard- Information Decoding}
\label{sec:Soft- vs Hard- Information Decoding}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=4e-5, ymax=2e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=\W/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~hard}
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=*, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=\W/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~soft}
}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=4e-5, ymax=2e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=\W/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~hard}
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=*, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=\W/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~soft}
}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code with $F=1,
n_\text{iter,BP}=32,n_\text{rounds}=12$}
\end{figure}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=\F,+system.W=5/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~hard}
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=*, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=\F,+system.W=5/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~soft}
}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, densely dashed, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=\F,+system.W=5/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~hard}
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=*, line width=1pt, \col]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=\F,+system.W=5/2026-03-30_08-57-21/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~soft}
}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code with $W=5,
n_\text{iter,BP}=200,n_\text{rounds}=12$}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Soft-Information Decoding: Best Performance}
\label{sec:Soft-Information Decoding: Best Performance}
\begin{figure}[H]
\centering
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\addplot+[mark=o, line width=1pt, densely dashed, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\addlegendentryexpanded{Windowed, hard}
\addplot+[mark=*, line width=1pt, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\addlegendentryexpanded{Windowed, soft}
\addplot+[mark=o, line width=1pt, densely dashed, scol1]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\addlegendentryexpanded{Whole window}
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=8e-4, ymax=2.5e-1,
grid=both,
legend pos = south east,
xtick={0.001,0.0015,...,0.004},
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
xlabel={Physical error rate},
ylabel={Per-round-LER},
]
\addplot+[mark=o, line width=1pt, densely dashed, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\addlegendentryexpanded{Windowed, hard}
\addplot+[mark=*, line width=1pt, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\addlegendentryexpanded{Windowed, soft}
\addplot+[mark=o, line width=1pt, densely dashed, scol1]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=windowed_vs_whole_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\addlegendentryexpanded{Whole window}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}
\caption{BP simulations for the [[144,12,12]]-BB code,
$W=5, F=1, n_\text{iter,BP} = 200, n_\text{rounds}=12$}
\end{figure}
\end{document}

View File

@@ -0,0 +1,587 @@
\documentclass{article}
\usepackage[a4paper,left=2cm,right=2cm,top=2.5cm,bottom=2cm]{geometry}
\usepackage{float}
\usepackage{amsmath}
\usepackage{hyperref}
\usepackage{amsfonts}
\usepackage{mleftright}
\usepackage{bm}
\usepackage{tikz}
\usepackage{subcaption}
\usepackage{xcolor}
\usepackage{pgfplots}
\usepackage{pgfplotstable}
\pgfplotsset{compat=newest}
\usepackage{acro}
\usepackage{braket}
\usepackage[
backend=biber,
style=ieee,
sorting=nty,
]{biblatex}
\usepackage{todonotes}
\usepackage{lipsum}
\usetikzlibrary{calc, positioning}
\input{lib/latex-common/common.tex}
\pgfplotsset{colorscheme/matplotlib}
%
%
% Custom commands
%
%
\newcommand{\red}[1]{\textcolor{red}{#1}}
\newcommand{\figwidth}{7.5cm}
\newcommand{\figheight}{6.5cm}
%
%
% Acronyms
%
%
\DeclareAcronym{qec}{
short=QEC,
long=quantum error correction
}
\addbibresource{src/proposal/MA.bib}
%
%
% Content
%
%
\title{Results: 2026-04-01}
\author{Andreas Tsouchlos}
\date{}
\begin{document}
\maketitle
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Soft vs Hard Information Decoding}
\label{sec:Soft vs Hard Information Decoding}
\begin{figure}[H]
\centering
\begin{subfigure}{0.4\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
enlargelimits=false,
ymin=5e-3, ymax=2.5e-1,
grid=both,
xtick={8, 32, 64, 96, 128, 160, 192},
xlabel={Max BP iterations},
ylabel={Per-round-LER},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[
mark=o, line width=1pt, densely dashed, \col,
]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=\W/2026-03-30_23-30-38/LERs.csv};
}
\temp
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=\W/2026-03-30_23-30-38/LERs.csv};
}
\temp
}
\addplot+[mark=o, line width=1pt, black]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=5/2026-03-30_23-30-38/LERs.csv};
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.6\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=5e-3, ymax=2.5e-1,
grid=both,
legend pos = outer north east,
xtick={8, 32, 64, 96, 128, 160, 192},
xlabel={Max BP iterations},
ylabel={},
yticklabels = {},
]
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[
mark=o, line width=1pt, densely dashed, \col,
]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=\W/2026-03-30_23-30-38/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~hard}
}
\foreach \W/\col in {3/scol2,4/scol1,5/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=\W/2026-03-30_23-30-38/LERs.csv};
}
\temp
\addlegendentryexpanded{$W = \W$~soft}
}
\addplot+[mark=o, line width=1pt, black]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=5/2026-03-30_23-30-38/LERs.csv};
\addlegendentryexpanded{Whole}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}%
\caption{BP simulations for the [[144,12,12]]-BB code with
$n_\text{rounds}=12,p_\text{phys} = 0.0025,F=1$}
\end{figure}
\begin{figure}[H]
\centering
\begin{subfigure}{0.4\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
enlargelimits=false,
ymin=5e-3, ymax=2.5e-1,
grid=both,
xtick={8, 32, 64, 96, 128, 160, 192},
xlabel={Max BP iterations},
ylabel={Per-round-LER},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[
mark=o, line width=1pt, densely dashed, \col,
]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=\F,+system.W=5/2026-03-30_23-30-38/LERs.csv};
}
\temp
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=\F,+system.W=5/2026-03-30_23-30-38/LERs.csv};
}
\temp
}
\addplot+[mark=o, line width=1pt, black]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=5/2026-03-30_23-30-38/LERs.csv};
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.6\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=5e-3, ymax=2.5e-1,
grid=both,
legend pos = outer north east,
xtick={8, 32, 64, 96, 128, 160, 192},
xlabel={Max BP iterations},
ylabel={},
yticklabels = {},
]
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[
mark=o, line width=1pt, densely dashed, \col,
]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=\F,+system.W=5/2026-03-30_23-30-38/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~hard}
}
\foreach \F/\col in {1/scol2,2/scol1,3/scol0} {
\edef\temp{\noexpand
\addplot+[mark=o, line width=1pt, \col]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=\F,+system.W=5/2026-03-30_23-30-38/LERs.csv};
}
\temp
\addlegendentryexpanded{$F = \F$~soft}
}
\addplot+[mark=o, line width=1pt, black]
table[
col sep=comma, x=max_bp_iter,
y expr={1 - (1-\noexpand\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_over_iter_param_exploration,+system.F=1,+system.W=5/2026-03-30_23-30-38/LERs.csv};
\addlegendentryexpanded{Whole}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}%
\caption{BP simulations for the [[144,12,12]]-BB code with
$n_\text{rounds}=12,p_\text{phys} = 0.0025,W=5$}
\end{figure}
\newpage
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Whole vs Windowed Decoding with Constant Total Number of Iterations}
\label{sec:Whole vs Windowed Decoding with Constant Total Number of Iterations}
%%%%%%%%%%%%%%%%
\subsection{32 Sliding-Window Iterations}
\label{subsec:32 Sliding-Window Iterations}
\begin{minipage}[t]{0.5\textwidth}
\begin{itemize}
\item Sliding-window decoding
\begin{itemize}
\item $n_\text{iter,BP} = 32$
\item $W=5, F=1 \Rightarrow n_\text{windows} = 8$
\item $n_\text{iter,BP}^\text{total} = n_\text{windows}
\cdot n_\text{iter,BP} = 256$
\end{itemize}
\end{itemize}
\end{minipage}%
\begin{minipage}[t]{0.5\textwidth}
\begin{itemize}
\item Whole decoding
\begin{itemize}
\item $n_\text{iter,BP} = 256$
\end{itemize}
\end{itemize}
\end{minipage}
\begin{figure}[H]
\centering
\begin{subfigure}{0.4\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
enlargelimits=false,
ymin=5e-5, ymax=2.5e-1,
grid=both,
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
grid=both,
xtick={0.001,0.0015,...,0.004},
xlabel={Physical Error Rate},
ylabel={Per-round-LER},
]
\addplot+[
mark=o, line width=1pt, densely dashed, black,
]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=whole_more_iterations,+system.F=1,+system.W=3/2026-04-01_15-41-23/LERs.csv};
\addplot+[mark=o, line width=1pt, densely dashed, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_00-06-26/LERs.csv};
\addplot+[mark=*, line width=1pt, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_00-06-26/LERs.csv};
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.6\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=5e-5, ymax=2.5e-1,
grid=both,
legend pos = outer north east,
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
grid=both,
xtick={0.001,0.0015,...,0.004},
xlabel={Physical Error Rate},
ylabel={},
yticklabels = {},
]
\addplot+[mark=o, line width=1pt, densely dashed, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_00-06-26/LERs.csv};
\addlegendentry{Hard}
\addplot+[mark=*, line width=1pt, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_00-06-26/LERs.csv};
\addlegendentry{Soft}
\addplot+[
mark=o, line width=1pt, densely dashed, black,
]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=whole_more_iterations,+system.F=1,+system.W=3/2026-04-01_15-41-24/LERs.csv};
\addlegendentryexpanded{Whole}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}%
\caption{BP simulations for the [[144,12,12]]-BB code with
$n_\text{rounds}=12, W=5, F=1$}
\end{figure}
%%%%%%%%%%%%%%%%
\subsection{200 Sliding-Window Iterations}
\label{subsec:200 Sliding-Window Iterations}
\begin{minipage}[t]{0.5\textwidth}
\begin{itemize}
\item Sliding-window decoding
\begin{itemize}
\item $n_\text{iter,BP} = 200$
\item $W=5, F=1 \Rightarrow n_\text{windows} = 8$
\item $n_\text{iter,BP}^\text{total} = n_\text{windows}
\cdot n_\text{iter,BP} = 1600$
\end{itemize}
\end{itemize}
\end{minipage}%
\begin{minipage}[t]{0.5\textwidth}
\begin{itemize}
\item Whole decoding
\begin{itemize}
\item $n_\text{iter,BP} = 1536$
\end{itemize}
\end{itemize}
\end{minipage}
\begin{figure}[H]
\centering
\begin{subfigure}{0.4\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
enlargelimits=false,
ymin=5e-5, ymax=2.5e-1,
grid=both,
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
grid=both,
xtick={0.001,0.0015,...,0.004},
xlabel={Physical Error Rate},
ylabel={Per-round-LER},
]
\addplot+[
mark=o, line width=1pt, densely dashed, black,
]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=whole_more_iterations,+system.F=1,+system.W=5/2026-04-01_18-31-29/LERs.csv};
\addplot+[mark=o, line width=1pt, densely dashed, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\addplot+[mark=*, line width=1pt, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=minimum_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\end{axis}
\end{tikzpicture}
\caption{Min-Sum}
\end{subfigure}%
\begin{subfigure}{0.6\textwidth}
\centering
\begin{tikzpicture}
\begin{axis}[
width=\figwidth,
height=\figheight,
ymode=log,
legend style={
cells={anchor=west},
cells={align=left},
},
enlargelimits=false,
ymin=5e-5, ymax=2.5e-1,
grid=both,
legend pos = outer north east,
xticklabel style={/pgf/number format/fixed},
xticklabel style={/pgf/number format/precision=4},
scaled x ticks=false,
grid=both,
xtick={0.001,0.0015,...,0.004},
xlabel={Physical Error Rate},
ylabel={},
yticklabels = {},
]
\addplot+[mark=o, line width=1pt, densely dashed, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_hard})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\addlegendentry{Hard}
\addplot+[mark=*, line width=1pt, scol0]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_soft})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=soft_vs_hard_param_exploration,+system.F=1,+system.W=5/2026-03-30_08-57-21/LERs.csv};
\addlegendentry{Soft}
\addplot+[
mark=o, line width=1pt, densely dashed, black,
]
table[
col sep=comma, x=physical_p,
y expr={1 - (1-\thisrow{LER_whole})^(1/12)}
]
{/home/andreas/workspace/private/ma-sw-results/outputs/+decoder.bp_method=product_sum,+experiment=whole_more_iterations,+system.F=1,+system.W=3/2026-04-01_18-31-29/LERs.csv};
\addlegendentryexpanded{Whole}
\end{axis}
\end{tikzpicture}
\caption{SPA}
\end{subfigure}%
\caption{BP simulations for the [[144,12,12]]-BB code with
$n_\text{rounds}=12, W=5, F=1$}
\end{figure}
\end{document}

74
src/thesis/acronyms.tex Normal file
View File

@@ -0,0 +1,74 @@
\DeclareAcronym{qec}{
short=QEC,
long=quantum error correction
}
\DeclareAcronym{bp}{
short=BP,
long=belief propagation
}
\DeclareAcronym{nms}{
short=NMS,
long=normalized min-sum
}
\DeclareAcronym{spa}{
short=SPA,
long=sum-product algorithm
}
\DeclareAcronym{llr}{
short=LLR,
long=log-likelihood ratio
}
\DeclareAcronym{sc}{
short=SC,
long=spatially coupled
}
\DeclareAcronym{ldpc}{
short=LDPC,
long=low-density parity-check
}
\DeclareAcronym{ml}{
short=ML,
long=maximum likelihood
}
\DeclareAcronym{map}{
short=MAP,
long=maximum a posteriori
}
\DeclareAcronym{pcm}{
short=PCM,
long=parity-check matrix
}
\DeclareAcronym{vn}{
short=VN,
long=variable node
}
\DeclareAcronym{cn}{
short=CN,
long=check node
}
\DeclareAcronym{ber}{
short=BER,
long=bit error rate
}
\DeclareAcronym{fer}{
short=FER,
long=frame error rate
}
\DeclareAcronym{awgn}{
short=AWGN,
long=additive white Gaussian noise
}

1565
src/thesis/bibliography.bib Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1 @@
\chapter{Introduction}

View File

@@ -0,0 +1,762 @@
\chapter{Fundamentals}
\label{ch:Fundamentals}
\Ac{qec} is a field of research combining ``classical''
communications engineering and quantum information science.
This chapter provides the relevant theoretical background on both of
these topics and subsequently introduces the fundamentals of \ac{qec}.
% TODO: Is an explanation of BP with guided decimation needed in this chapter?
% TODO: Is an explanation of OSD needed chapter?
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Classical Error Correction}
\label{sec:Classical Error Correction}
The core concept underpinning error correcting codes is the
realization that introducing a finite amount of redundancy to
information before transmission can considerably reduce the error rate.
Specifically, Shannon proved in 1948 that for any channel, a block
code can be found that achieves arbitrarily small probability of
error at any communication rate up to the capacity of the channel
when the block length approaches infinity
\cite[Sec.~13]{shannon_mathematical_1948}.
In this section, we explore the concepts of ``classical'' (as in non-quantum)
error correction that are central to this work.
We start by looking at different ways of encoding information,
first considering binary linear block codes in general and then \ac{ldpc} and
\ac{sc}-\ac{ldpc} codes.
Finally, we pivot to the decoding process, specifically the \ac{bp}
algorithm.
\subsection{Binary Linear Block Codes}
%
% Codewords, n, k, rate
%
One particularly important class of coding schemes is that of binary
linear block codes.
The information to be protected takes the form of a sequence of
binary symbols, which is split into separate blocks.
Each block is encoded, transmitted, and decoded separately.
The encoding step introduces redundancy by mapping input messages
$\bm{u} \in \mathbb{F}_2^k$ of length $k \in \mathbb{N}$ (called the
\textit{information length}) onto \textit{codewords} $\bm{x} \in
\mathbb{F}_2^n$ of length $n \in \mathbb{N}$ (called the
\textit{block length}) with $n > k$.
A measure of the amount of introduced redundancy is the \textit{code
rate} $R = k/n$.
We call the set of all codewords $\mathcal{C}$ the \textit{code}
\cite[Sec.~3.1.1]{ryan_channel_2009}.
%
% d_min and the [] Notation
%
During the encoding process, a mapping from $\mathbb{F}_2^k$
onto $\mathcal{C} \subset \mathbb{F}_2^n$ takes place.
The input messages are mapped onto an expanded vector space, where
they are ``further apart'', giving rise to the error correcting
properties of the code.
This notion of the distance between two codewords $\bm{x}_1$ and
$\bm{x}_2$ can be expressed using the \textit{Hamming distance} $d(\bm{x}_1,
\bm{x}_2)$, which is defined as the number of positions in which they differ.
We define the \textit{minimum distance} of a code $\mathcal{C}$ as
%
\begin{align*}
d_\text{min} := \min \left\{ d(\bm{x}_1, \bm{x}_2) : \bm{x}_1,
\bm{x}_2 \in \mathcal{C}, \bm{x}_1 \neq \bm{x}_2 \right\}
.
\end{align*}
%
We can signify that a binary linear block code has information length
$k$, block length $n$ and minimum distance $d_\text{min}$ using the
notation $[n,k,d_\text{min}]$ \cite[Sec.~1.3]{macwilliams_theory_1977}.
%
% Parity checks, H, and the syndrome
%
A particularly elegant way of describing the subspace $C$ of
$\mathbb{F}_2^n$ that the codewords make up is the notion of
\textit{parity checks}.
Since $\lvert \mathcal{C} \rvert = 2^k$ and $\lvert \mathbb{F}_2^n
\rvert = 2^n$, we could introduce $n-k$ conditions to constrain the
additional degrees of freedom.
These conditions, called parity checks, take the form of equations
over $\mathbb{F}_2^n$, linking the individual positions of each codeword.
We can arrange the coefficients of these equations in a
\textit{parity-check matrix} (\acs{pcm}) $\bm{H} \in
\mathbb{F}_2^{(n-k) \times n}$ and equivalently define the code as
\cite[Sec.~3.1.1]{ryan_channel_2009}
%
\begin{align*}
\mathcal{C} = \left\{ \bm{x} \in \mathbb{F}_2^n :
\bm{H}\bm{x}^\text{T} = \bm{0} \right\}
.%
\end{align*}
Note that in general we may have linearly dependent parity checks,
prompting us to define the \ac{pcm} as $\bm{H} \in
\mathbb{F}_2^{m\times n}$ with $\hspace{2mm} m \ge n-k$ instead.
The \textit{syndrome} $\bm{s} = \bm{H} \bm{v}^\text{T}$ describes
which parity checks a candidate codeword $\bm{v} \in \mathbb{F}_2^n$ violates.
The representation using the \ac{pcm} has the benefit of providing a
description of the code, the memory complexity of which doesn't grow
exponentially with $n$, in contrast to keeping track of all codewords directly.
%
% The decoding problem
%
Figure \ref{fig:Diagram of a transmission system} visualizes the
communication process \cite[Sec.~1.1]{ryan_channel_2009}.
An input message $\bm{u}\in \mathbb{F}_2^k$ is mapped onto a codeword $\bm{x}
\in \mathbb{F}_2^n$. This is passed on to a modulator, which
interacts with the physical channel.
A demodulator processes the channel output and forwards the result
$\bm{y}$ to a decoder.
We differentiate between \textit{soft-decision} decoding, where
$\bm{y} \in \mathbb{R}^n$, and \textit{hard-decision} decoding, where
$\bm{y} \in \mathbb{F}_2^n$ \cite[Sec.~1.5.1.3]{ryan_channel_2009}.
Finally, the decoder is responsible for obtaining an estimate
$\hat{\bm{u}} \in \mathbb{F}_2^k$ of the original input message.
This is done by first finding an estimate $\hat{\bm{x}}$ of the sent
codeword and undoing the encoding.
The decoding problem that we generally attempt to solve thus consists
in finding the best estimate $\hat{\bm{x}}$ given $\bm{y}$.
\begin{figure}[t]
\centering
\tikzset{
box/.style={
rectangle, draw=black, minimum width=17mm, minimum height=8mm,
},
}
\begin{tikzpicture}
[
node distance = 2mm and 7mm,
]
\node (in) {};
\node[box, right=of in] (enc) {Encoder};
\node[box, minimum width=25mm, right=of enc] (mod) {Modulator};
\node[box, below right=of mod] (cha) {Channel};
\node[box, minimum width=25mm, below left=of cha] (dem) {Demodulator};
\node[box, left=of dem] (dec) {Decoder};
\node[left=of dec] (out) {};
\draw[-{latex}] (in) -- (enc) node[midway, above] {$\bm{u}$};
\draw[-{latex}] (enc) -- (mod) node[midway, above] {$\bm{x}$};
\draw[-{latex}] (mod) -| (cha);
\draw[-{latex}] (cha) |- (dem);
\draw[-{latex}] (dem) -- (dec) node[midway, above] {$\bm{y}$};
\draw[-{latex}] (dec) -- (out) node[midway, above] {$\hat{\bm{u}}$};
\end{tikzpicture}
\caption{Overview of a transmission system.}
\label{fig:Diagram of a transmission system}
\end{figure}
%
%
% Hard vs. soft information
%
\subsection{Low-Density Parity-Check Codes}
%
% Core concept
%
Shannon's noisy-channel coding theorem is stated for codes whose block
length approaches infinity. This suggests that as the block length
becomes larger, the performance of the considered codes should
generally improve.
However, the size of the \ac{pcm}, and thus in general the decoding complexity,
of a linear block code grows quadratically with $n$.
This would quickly render decoding intractable as we increase the block length.
We can get around this problem by constructing $\bm{H}$ in such a
manner that the number of nonzero entries grows less than quadratically, e.g.,
only linearly.
This is exactly the motivation behind \ac{ldpc} codes
\cite[Ch.~1]{gallager_low_1960}.
%
% Tanner Graph, VNs and CNs
%
\ac{ldpc} codes belong to a class sometimes referred to as ``modern codes''.
These differ from ``classical codes'' in their decoding algorithms:
Classical codes are usually decoded using one-step hard-decision decoding,
whereas modern codes are suitable for iterative soft-decision
decoding \cite[Preface]{ryan_channel_2009}. The iterative decoding algorithms
in question are generally defined in terms of message passing on the
\textit{Tanner graph} of the code. The Tanner graph is a bipartite
graph that constitutes an alternative representation of the \ac{pcm}.
We define two types of nodes: \acp{vn}, corresponding to codeword
bits, and \acp{cn}, corresponding to individual parity checks.
We then construct the Tanner graph by connecting each \ac{cn} to
the \acp{vn} that make up the corresponding parity check
\cite[Sec.~5.1.2]{ryan_channel_2009}.
Figure \ref{PCM and Tanner graph of the Hamming code} shows this
construction for the [7,4,3]-Hamming code.
%
\begin{figure}[t]
\centering
\begin{align*}
\bm{H} =
\begin{pmatrix}
0 & 1 & 1 & 1 & 1 & 0 & 0 \\
1 & 0 & 1 & 1 & 0 & 1 & 0 \\
1 & 1 & 0 & 1 & 0 & 0 & 1 \\
\end{pmatrix}
\end{align*}
\vspace*{2mm}
\tikzset{
VN/.style={
circle, fill=KITgreen, minimum width=1mm, minimum height=1mm,
},
CN/.style={
rectangle, fill=KITblue, minimum width=1mm, minimum height=1mm,
},
}
\begin{tikzpicture}
\node[VN, label=above:$x_1$] (vn1) {};
\node[VN, right=12mm of vn1, label=above:$x_2$] (vn2) {};
\node[VN, right=12mm of vn2, label=above:$x_3$] (vn3) {};
\node[VN, right=12mm of vn3, label=above:$x_4$] (vn4) {};
\node[VN, right=12mm of vn4, label=above:$x_5$] (vn5) {};
\node[VN, right=12mm of vn5, label=above:$x_6$] (vn6) {};
\node[VN, right=12mm of vn6, label=above:$x_7$] (vn7) {};
\node[
CN, below=25mm of vn4,
label={below:$x_1 + x_3 + x_4 + x_6 = 0$}
] (cn2) {};
\node[
CN, left=40mm of cn2,
label={below:$x_2 + x_3 + x_4 + x_5 = 0$}
] (cn1) {};
\node[
CN, right=40mm of cn2,
label={below:$x_1 + x_2 + x_4 + x_7 = 0$}
] (cn3) {};
\foreach \n in {2,3,4,5} {
\draw (cn1) -- (vn\n);
}
\foreach \n in {1,3,4,6} {
\draw (cn2) -- (vn\n);
}
\foreach \n in {1,2,4,7} {
\draw (cn3) -- (vn\n);
}
\end{tikzpicture}
\caption{The \ac{pcm} and corresponding Tanner graph of the
[7,4,3]-Hamming code.}
\label{PCM and Tanner graph of the Hamming code}
\end{figure}
%
% N_V(j), N_C(i)
%
Mathematically, we represent a \ac{vn} using the index $i \in
\mathcal{I} := \left[
1 : n \right]$ and a \ac{cn} using the index $j \in \mathcal{J}
:= \left[ 1 : m \right]$.
We can then encode the information contained in the graph by defining
the neighborhood of a variable node $i$ as
$\mathcal{N}_\text{V} (i) = \left\{ j \in \mathcal{J} : \bm{H}_{j,i}
= 1 \right\}$
and that of a check node $j$ as
$\mathcal{N}_\text{C} (j) = \left\{ i \in \mathcal{I} : \bm{H}_{j,i}
= 1 \right\}$.
%
% Error floor and waterfall regions
%
We typically evaluate the performance of LDPC codes using the
\ac{ber} or the \ac{fer} (a \textit{frame} referes to one whole
transmitted block in this context).
Considering an \ac{awgn} channel, \autoref{fig:ldpc-perf} shows a
qualitative performance characteristic of an \ac{ldpc} code
\cite[Fig.~1]{costello_spatially_2014}. We talk of the
\textit{waterfall} and the \textit{error floor} regions.
\begin{figure}[t]
\centering
\begin{tikzpicture}
\begin{axis}[
width=12cm,
height=9cm,
xlabel={Signal-to-noise ratio},
ylabel={Error rate},
% xmin=0, xmax=6,
enlarge x limits=false,
ymin=1e-9, ymax=1,
ticks=none,
% y tick label={},
ymode=log,
grid=both,
grid style={line width=0.2pt, draw=gray!30},
major grid style={line width=0.4pt, draw=gray!50},
legend pos=north east,
legend cell align={left},
]
\addplot+[mark=none, solid, smooth, KITblue] coordinates {
(4.5789E-01, 1.1821E-01)
(6.6842E-01, 9.4575E-02)
(8.6316E-01, 5.2657E-02)
(1.0421E+00, 2.2183E-02)
(1.1789E+00, 8.3588E-03)
(1.3368E+00, 1.4835E-03)
(1.4895E+00, 1.6852E-04)
(1.5842E+00, 2.8285E-05)
(1.6737E+00, 4.2465E-06)
(1.7684E+00, 3.4519E-07)
(1.8316E+00, 3.9213E-08)
(1.8684E+00, 6.2247E-09)
(1.9053E+00, 1E-09)
};
\addlegendentry{Regular}
\addplot+[mark=none, solid, smooth, KITorange] coordinates {
(4.5789E-01, 1.1821E-01)
(6.4211E-01, 4.9800E-02)
(7.5263E-01, 1.2700E-02)
(8.1579E-01, 2.3177E-03)
(8.6842E-01, 3.5779E-04)
(9.1053E-01, 5.3716E-05)
(9.4737E-01, 4.8818E-06)
(9.8947E-01, 6.5555E-07)
(1.0421E+00, 9.5713E-08)
% (1.0684E+00, 2.9670E-08)
(1.1474E+00, 1.2499E-08)
(1.3000E+00, 7.1560E-09)
(1.4579E+00, 6.0535E-09)
% (1.6105E+00, 5E-09)
(1.9579E+00, 4E-09)
(2.2947E+00, 3.1876E-09)
% (2.8842E+00, 2.0403E-09)
};
\addlegendentry{Irregular}
\draw[gray, densely dashed]
(axis cs:0.65, 2e-3) rectangle (axis cs:1.65, 5e-5);
\node[below] at (axis cs:1.15, 6e-5) {Waterfall};
\draw[gray, densely dashed]
(axis cs:1, 6e-8) rectangle (axis cs:2, 2e-9);
\node[above] at (axis cs:1.5, 7e-8) {Error floor};
\end{axis}
\end{tikzpicture}
\caption{
Qualitative performance characteristic of \ac{ldpc} code
in an \ac{awgn} channel. Adapted from
\cite[Fig.~1]{costello_spatially_2014}.
}
\label{fig:ldpc-perf}
\end{figure}
Broadly, there are two kinds of \ac{ldpc} codes, \textit{regular} and
\textit{irregular}.
Regular codes are characterized by the fact that the weights, i.e.,
the numbers of ones, of their rows and columns are constant
\cite[Sec.~5.1.1]{ryan_channel_2009}.
Already during their introduction, regular \ac{ldpc} codes were shown to have
a minimum distance scaling linearly with the block length $n$ for
large values \cite[Ch.~2,~Theorem~1]{gallager_low_1960},
which leads to them not exhibiting an error floor under \ac{ml} decoding.
Irregular codes, on the other hand, generally do exhibit an error floor,
their redeeming quality being the ability to reach near-capacity
performance in the waterfall region \cite[Intro.]{costello_spatially_2014}.
\subsection{Spatially-Coupled LDPC Codes}
A relatively recent development in the world of \ac{ldpc} codes is
that of \ac{sc}-\ac{ldpc} codes.
Their key feature is that they combine the best properties of regular
and irregular codes.
They have a minimum distance that grows linearly with $n$, promising
good error floor behavior, and capacity approaching
iterative decoding behavior, promising good performance in the
waterfall region \cite[Intro.]{costello_spatially_2014}.
The essential property of \ac{sc}-\ac{ldpc} codes is that codewords
from different \textit{spatial positions}, that would ordinarily be sent
one after the other independently, are coupled.
This is achieved by connecting some \acp{vn} of one spatial position to
\acp{cn} of another, resulting in a \ac{pcm} of the form
\cite[Eq.~1]{hassan_fully_2016}
%
\begin{align*}
\bm{H} =
\begin{pmatrix}
\bm{H}_0(1) & & \\
\vdots & \ddots & \\
\bm{H}_K(1) & & \bm{H}_0(L) \\
& \ddots & \\
& & \bm{H}_K(L) \\
\end{pmatrix}
,
\end{align*}
%
where $K \in \mathbb{N}$ is the \textit{coupling width} and $L \in
\mathbb{N}$ is the number of spatial positions.
This construction results in a Tanner graph as depicted in
\autoref{fig:sc-ldpc-tanner}.
\begin{figure}[t]
\centering
\tikzset{
VN/.style={
circle, fill=KITgreen, minimum width=1mm, minimum height=1mm,
},
CN/.style={
rectangle, fill=KITblue, minimum width=1mm, minimum height=1mm,
},
}
\begin{tikzpicture}[node distance=7mm and 1cm]
\node[VN] (vn00) {};
\node[VN, below = of vn00] (vn01) {};
\node[VN, below = of vn01] (vn02) {};
\node[VN, below = of vn02] (vn03) {};
\node[VN, below = of vn03] (vn04) {};
\coordinate (temp) at ($(vn01)!0.5!(vn02)$);
\node[CN, right = of temp] (cn00) {};
\node[CN, below = of cn00] (cn01) {};
\draw (vn00) -- (cn00);
\draw (vn01) -- (cn00);
\draw (vn03) -- (cn00);
\draw (vn01) -- (cn01);
\draw (vn02) -- (cn01);
\draw (vn04) -- (cn01);
\foreach \i in {1,2,3} {
\pgfmathtruncatemacro{\previ}{\i-1}
\node[VN, right = 25mm of vn\previ 0] (vn\i0) {};
\foreach \j in {1,...,4} {
\pgfmathtruncatemacro{\prevj}{\j-1}
\node[VN, below = of vn\i\prevj] (vn\i\j) {};
}
\coordinate (temp) at ($(vn\i1)!0.5!(vn\i2)$);
\node[CN, right = of temp] (cn\i0) {};
\node[CN, below = of cn\i0] (cn\i1) {};
\draw (vn\i0) -- (cn\i0);
\draw (vn\i1) -- (cn\i0);
\draw (vn\i3) -- (cn\i0);
\draw (vn\i1) -- (cn\i1);
\draw (vn\i2) -- (cn\i1);
\draw (vn\i4) -- (cn\i1);
}
\node[right = 25mm of vn30] (vn40) {};
\node[below = of vn40] (vn41) {};
\node[below = of vn41] (vn42) {};
\node[below = of vn42] (vn43) {};
\node[below = of vn43] (vn44) {};
\coordinate (temp) at ($(vn41)!0.5!(vn42)$);
\node[right = of temp] (cn40) {};
\node[below = of cn40] (cn41) {};
\foreach \i in {0,1,2} {
\pgfmathtruncatemacro{\next}{\i+1}
\pgfmathtruncatemacro{\nextnext}{\i+2}
\draw (vn\i 3) to[bend right] (cn\next 1);
\draw (vn\i 1) to[bend left] (cn\nextnext 0);
}
\draw (vn33) to[bend right] (cn41);
\node at ($(cn40)!0.5!(cn41)$) {\dots};
\draw[decorate, decoration={brace, amplitude=10pt}]
([xshift=-5mm,yshift=2mm]vn00.north) --
([xshift=5mm,yshift=2mm]vn00.north -| cn20.north)
node[midway, above=4mm] {K};
\end{tikzpicture}
\caption{
Visualization of the coupling between the Tanner graphs
of individual spatial positions.
}
\label{fig:sc-ldpc-tanner}
\end{figure}
Note that at the first and last few spatial positions, some \acp{cn}
have lower degrees.
This leads to more reliable information about the
\acp{vn} that, as we will see, is
later passed to subsequent spatial positions during decoding.
This is precisely the effect that leads to the good performance of
\ac{sc}-\ac{ldpc} codes in the waterfall region \cite{costello_spatially_2014}.
\subsection{Iterative Decoding}
% Introduction
\ac{ldpc} codes are generally decoded using efficient iterative
algorithms, something that is possible due to their sparsity
\cite[Sec.~5.3]{ryan_channel_2009}.
The algorithm originally proposed alongside LDPC codes for this
purpose by Gallager in 1960 is now known as the \ac{spa}
\cite[5.4.1]{ryan_channel_2009}, also called \ac{bp}.
The optimality criterion the \ac{spa} is built around is a
symbol-wise \ac{map} decision \cite[Sec.~5.4.1]{ryan_channel_2009}.
The core idea of the resulting algorithm is to view \acp{cn} as
representing single-parity check codes and \acp{vn} as representing
repetition codes.
The algorithm alternates between consolidating soft information about
the \acp{vn} in the \acp{cn}, and consolidating soft information about
the \acp{cn} in the \acp{vn}.
To this end, messages are passed back and forth along the edges of
the Tanner graph.
$L_{i\rightarrow j}$ represents a message passed from \ac{vn} $i$ to
\ac{cn} j, $L_{i\leftarrow j}$ represents a message passed from
\ac{cn} j to \ac{vn} i.
The \acp{vn} additionally receive messages \cite[5.4.2]{ryan_channel_2009}
\begin{align*}
\tilde{L}_i = \log \frac{P(X=0 \vert Y=y)}{P(X=1 \vert Y=y)},
\end{align*}
computed from the channel outputs.
The consolidation of the information occurs in the \ac{vn} update
\begin{align*}
L_{i\rightarrow j} = \tilde{L}_i + \sum_{j'\in \mathcal{N}(i)\setminus
j} L_{i\leftarrow j'}
\end{align*}
and the \ac{cn} update
\begin{align*}
L_{i\leftarrow j} = 2\cdot \tanh^{-1} \left( \prod_{i'\in
\mathcal{N}(j)\setminus i} \tanh \frac{L_{i'\rightarrow j}}{2} \right)
.
\end{align*}
A basic assumption for the derivation of the \ac{spa} is that the
messages are statistically independent.
If the Tanner graph has cycles, however, this
condition is not met.
The shorter the cycles, the sooner this condition is violated and the
worse the approximation becomes \cite[Sec.~5.4.4]{ryan_channel_2009}.
Cycles of length four (so-called \emph{$4$-cycles}) are the shortest
possible cycles and are thus especially problematic.
% Min-sum algorithm
A simplification of the \ac{spa} is the min-sum decoder. Here, the
\ac{cn} update is approximated as \cite[Sec.~5.5.1]{ryan_channel_2009}
\begin{align*}
L_{i \leftarrow j} = \prod_{i' \in \mathcal{N}(j)\setminus i}
\sign \left( L_{i' \rightarrow j} \right)
\cdot \min_{i' \in \mathcal{N}(j)\setminus i} \lvert
L_{i'\rightarrow j} \rvert
.
\end{align*}
% Sliding-window decoding
For \ac{sc}-\ac{ldpc} codes, the iterative decoding process is wrapped by a
windowing step. This is done to reduce the latency and memory requirements and
also the overall computational complexity \cite{costello_spatially_2014}.
To this end, the Tanner graph is split into several overlapping windows.
During decoding, the messages that are passed along the edges of the
graph in the overlapping regions are kept in memory and used for the
decoding of subsequent blocks \cite[Sec.~III.~C.]{hassan_fully_2016}.
\section{Quantum Mechanics and Quantum Information Science}
\label{sec:Quantum Mechanics and Quantum Information Science}
% TODO: Should the brief intro to QC be made later on or here?
%%%%%%%%%%%%%%%%
\subsection{Core Concepts and Notation}
\label{subsec:Notation}
\ldots can be very elegantly expressed using the language of
linear algebra.
\todo{Mention that we model the state of a quantum mechanical system
as a vector}
The so called Bra-ket or Dirac notation is especially appropriate,
having been proposed by Paul Dirac in 1939 for the express purpose
of simplifying quantum mechanical notation \cite{dirac_new_1939}.
Two new symbols are defined, \emph{bra}s $\bra{\cdot}$ and
\emph{ket}s $\ket{\cdot}$.
Kets denote ordinary vectors, while bras denote their Hermitian conjugates.
For example, two vectors specified by the labels $a$ and $b$
respectively are written as $\ket{a}$ and $\ket{b}$.
Their inner product is $\braket{a\vert b}$.
\red{\textbf{Tensor product}}
\red{\ldots
\todo{Introduce determinate state or use a different word?}
Take for example two systems with the determinate states $\ket{0}$
and $\ket{1}$. In general, the state of each can be written as the
superposition%
%
\begin{align*}
\alpha \ket{0} + \beta \ket{1}
.%
\end{align*}
%
Combining these two sytems into one, the overall state becomes%
%
\begin{align*}
&\mleft( \alpha_1 \ket{0} + \beta_1 \ket{1} \mright) \otimes
\mleft( \alpha_2 \ket{0} + \beta_2 \ket{1} \mright) \\
= &\alpha_1 \alpha_2 \ket{0} \ket{0}
+ \alpha_1 \alpha_2 \ket{0} \ket{1}
+ \beta_1 \alpha_2 \ket{1} \ket{0}
+ \beta_1 \beta_2 \ket{1} \ket{1}
% =: &\alpha_{00} \ket{00}
% + \alpha_{01} \ket{01}
% + \alpha_{10} \ket{10}
% + \alpha_{11} \ket{11}
.%
\end{align*}%
%
\ldots When not ambiguous in the context, the tensor product
symbol may be omitted, e.g.,
\begin{align*}
\ket{0} \otimes \ket{0} = \ket{0}\ket{0}
.%
\end{align*}
}
As we will see, the core concept that gives quantum computing its
power is entanglement. When two quantum mechanical systems are
entangled, measuring the state of one will collapse that of the other.
Take for example two subsystems with the overall state
%
\begin{align*}
\ket{\psi} = \frac{1}{\sqrt{2}} \mleft( \ket{0}\ket{0} +
\ket{1}\ket{1} \mright)
.%
\end{align*}
%
If we measure the first subsystem as being in $\ket{0}$, we can
be certain that a measurement of the second subsystem will also yield $\ket{0}$.
Introducing a new notation for entangled states, we can write%
%
\begin{align*}
\ket{\psi} = \frac{1}{\sqrt{2}} \left( \ket{00} + \ket{11} \right)
.%
\end{align*}
%
\subsection{Projective Measurements}
\label{subsec:Projective Measurements}
% TODO: Write
%%%%%%%%%%%%%%%%
\subsection{Quantum Gates}
\label{subsec:Quantum Gates}
\red{
\textbf{Content:}
\begin{itemize}
\item Bra-ket notation
\item The tensor product
\item Projective measurements (the related operators,
eigenvalues/eigenspaces, etc.)
\begin{itemize}
\item First explain what an operator is
\end{itemize}
\item Abstract intro to QC: Use gates to process qubit
states, similar to classical case
\item X, Z, Y operators/gates
\item Hadamard gate (+ X and Z are the same thing in differt bases)
\item Notation of operators on multi-qubit states
\item The Pauli, Clifford and Magic groups
\end{itemize}
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Quantum Error Correction}
\label{sec:Quantum Error Correction}
\red{
\textbf{Content:}
\begin{itemize}
\item General context
\begin{itemize}
\item Why we want QC
\item Why we need QEC (correcting errors due to noisy gates)
\item Main challenges of QEC compared to classical
error correction
\end{itemize}
\item Stabilizer codes
\begin{itemize}
\item Definition of a stabilizer code
\item The stabilizer its generators (note somewhere
that the generators have to commute to be able to
be measured without disturbing each other)
\item syndrome extraction circuit
\item Stabilizer codes are effectively the QM
% TODO: Actually binary linear codes or just linear codes?
equivalent of binary linear codes (e.g.,
expressible via check matrix)
\end{itemize}
\item Digitization of errors
\item CSS codes
\item Color codes?
\item Surface codes?
\item Fault tolerant error correction (gates with which we do
error correction are also noisy)
\begin{itemize}
\item Transversal operations
\item \dots
\end{itemize}
\item Circuit level noise
\item Detector error model
\begin{itemize}
\item Columns of the check matrix represent different
possible error patterns $\rightarrow$ Check matrix
doesn't quite correspond to the codewords we used
initially anymore, but some similar structure ist
still there (compare with syndrome)
\end{itemize}
\end{itemize}
\textbf{General Notes:}
\begin{itemize}
\item Give a brief overview of the history of QEC
\item Note (and research if this is actually correct) that QC
was developed on an abstract level before thinking of
what hardware to use
\item Note that there are other codes than stabilizer codes
(and research and give some examples), but only
stabilizer codes are considered in this work
\item Degeneracy
\item The QEC decoding problem (considering degeneracy)
\end{itemize}
}
\subsection{Stabilizer Codes}
\subsection{CSS Codes}
\subsection{Quantum Low-Density Parity-Check Codes}

View File

@@ -0,0 +1,13 @@
\chapter{Fault Tolerant QEC}
\section{Fault Tolerance}
\section{Noise Models}
\subsection{Depolarizing Channel}
\subsection{Phenomenological Noise}
\subsection{Circuit-Level Noise}
\section{Detector Error Models}
\subsection{Measurement Syndrome Matrix}
\subsection{Detector Error Matrix}
\subsection{Detector Error Models}
\section{Practical Considerations}
\subsection{Practical Methodology}
\subsection{Stim}

View File

@@ -0,0 +1,5 @@
\chapter{Decoding under Detector Error Models}
\section{Sliding-Window Decoding}
\section{Treating Detector Error Matrices like SC-LDPC Codes}
\section{Soft-Information Aware Sliding-Window Decoding}
\section{Numerical Results and Analysis}

View File

@@ -0,0 +1 @@
\chapter{Conclusion and Outlook}

107
src/thesis/main.tex Normal file
View File

@@ -0,0 +1,107 @@
\documentclass{lib/cel-thesis/cel-thesis}
\usepackage[a4paper,left=3cm,right=3cm,top=2.5cm,bottom=2.5cm]{geometry}
\usepackage{float}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{mleftright}
\usepackage{bm}
\usepackage{tikz}
\usepackage{xcolor}
\usepackage{pgfplots}
\pgfplotsset{compat=newest}
\usepackage{acro}
\usepackage{braket}
% \usepackage[
% backend=biber,
% style=ieee,
% sorting=nty,
% ]{biblatex}
\usepackage{todonotes}
\usetikzlibrary{calc, positioning, arrows, fit}
\usetikzlibrary{external}
\tikzexternalize
\makeatletter
\renewcommand{\todo}[2][]{\tikzexternaldisable\@todo[#1]{#2}\tikzexternalenable}
\makeatother
%
%
% Custom commands
%
%
\newcommand{\red}[1]{\textcolor{red}{#1}}
\newcommand{\figwidth}{10cm}
\newcommand{\figheight}{7.5cm}
%
%
% Acronyms
%
%
\input{acronyms.tex}
\usepackage{babelbib}
\setlanguage
\usepackage{caption}
\usepackage{bm}
\usepackage{subcaption}
\usepackage{todonotes} % great for draft annotations
\DeclareCaptionLabelFormat{bf-nodot}{\textbf{#1}~\textbf{#2}}
\captionsetup{labelformat=bf-nodot,labelsep=colon}
%
%
% Content
%
%
\thesisTitle{Fault Tolerant Quantum Error Correction}
\thesisType{Master's Thesis}
\thesisAuthor{Andreas Tsouchlos}
\thesisAdvisor{Prof. Dr.-Ing. Laurent Schmalen}
\thesisHeadOfInstitute{Prof. Dr.-Ing. Laurent Schmalen}
% \thesisHeadOfInstitute{Prof. Dr.-Ing. Peter Rost}
%\thesisHeadOfInstitute{Prof. Dr.-Ing. Peter Rost\\Prof. Dr.-Ing.
% Laurent Schmalen}
\thesisSupervisor{Jonathan Mandelbaum}
\thesisStartDate{01.11.2025}
\thesisEndDate{04.05.2026}
\thesisSignatureDate{Signature date}
\thesisLanguage{english}
\begin{document}
\pagenumbering{roman} % all the preliminaries should be counted roman style
\maketitle
\newpage
% \include{chapters/abstract}
\cleardoublepage
\pagenumbering{arabic}
\tableofcontents
\cleardoublepage
\input{chapters/1_introduction.tex}
\input{chapters/2_fundamentals.tex}
\input{chapters/3_fault_tolerant_qec.tex}
\input{chapters/4_decoding_under_dems.tex}
\input{chapters/5_conclusion_and_outlook.tex}
% \appendix
% \listoffigures
% \listoftables
% \include{abbreviations}
\bibliography{lib/cel-thesis/IEEEabrv,src/thesis/bibliography}
\end{document}