41 Commits

Author SHA1 Message Date
4dfb3a7c35 Last changes 2026-05-06 01:58:49 +02:00
10d791fe04 Final readthrough corrections of quantum fundamentals 2026-05-04 23:04:28 +02:00
06852b8e62 Final readthrough corrections of classical fundamentals 2026-05-04 21:07:25 +02:00
400dc47df0 Incorporate Jonathan's corrections to classical fundamentals 2026-05-04 20:56:35 +02:00
ece8fc1715 Center error marker 2026-05-04 20:24:27 +02:00
56e3a0e5ca Consistently capitalize character after semicolon 2026-05-04 20:21:21 +02:00
8d6df8a79d Final readthrough corrections for fault tolerance chapter 2026-05-04 20:06:18 +02:00
c41ac9f61f Incorporate Jonathan's corrections to Fault Tolerance Chapter 2026-05-04 19:45:15 +02:00
a41e0b05fe Add Lia as supervisor 2026-05-04 19:20:08 +02:00
1edc3f301a Final readthrough corrections for decoding chapter 2026-05-04 18:42:39 +02:00
a977860ddb Incorporate Jonathan's correction to sliding-window decoding sections 2026-05-04 17:35:33 +02:00
7bf1b2f8d7 Incorporate Jonathan's corrections to numerical results section 2026-05-04 17:07:41 +02:00
72acea0321 Incorporate Jonathan's corrections to the introduction 2026-05-04 16:31:31 +02:00
f1a5aaf3f8 Make ToC be on one page 2026-05-04 16:20:37 +02:00
23828b671a Minor changes to conclusion 2026-05-04 16:08:56 +02:00
09893d527e Incorporate Jonathan's corrections to Abstract 2026-05-04 15:36:15 +02:00
25789a6bd3 Incorporate Jonathans's corrections to Conclusion 2026-05-04 15:28:11 +02:00
001ca614bb Fix bibliography 2026-05-04 15:17:05 +02:00
9e5eaaf985 Incorporate Lia's corrections to fault tolerance 2026-05-04 14:59:49 +02:00
17191382cf Incorporate Lia's corrections to QM and QEC fundamentals 2026-05-04 13:01:54 +02:00
aa907ef4a3 Incorporate Lia's corrections to classical fundamentals 2026-05-04 12:12:10 +02:00
12036caa91 Fix bibliography titlecase (in clean_bibliography.sh) and a few things in the bibliography itself 2026-05-04 10:53:50 +02:00
4c206ae9c4 Rephrase first sentence of abstract 2026-05-04 10:34:53 +02:00
01a754e5da Reset acronyms after abstract 2026-05-04 10:31:54 +02:00
81292a2644 Add abstract 2026-05-04 10:28:46 +02:00
73958d7850 Check out version of cel thesis template with option for signature 2026-05-04 02:08:32 +02:00
18e3683502 Add signature 2026-05-04 02:06:58 +02:00
1eb4db289e Make main.tex work with signature modification 2026-05-04 02:06:39 +02:00
f56cd05890 Fix bibtex definition for arxiv papers 2026-05-04 01:45:46 +02:00
e9d996155d Write conclusion 2026-05-04 01:21:26 +02:00
5e26179154 Finish intro 2026-05-03 20:51:32 +02:00
9ae98e07d7 Write most of Introduction; Fix citing Intro. 2026-05-03 19:09:29 +02:00
728c8560c7 Fix N_C/N_V notation 2026-05-03 14:07:04 +02:00
dd30b4fc0d Write captions 2026-05-03 04:26:58 +02:00
6e53ed5d1b Complete results chapter text 2026-05-03 04:00:05 +02:00
0016df0004 Add text for second BPGD plot 2026-05-03 03:10:21 +02:00
9ca2698d38 Add text for first BPGD figure 2026-05-03 02:16:22 +02:00
72461fe555 Complete first draft of warm-start sliding-window decoding section 2026-05-03 01:11:22 +02:00
5fabe2e146 Finish first draft of BP warm start subsection 2026-05-02 23:40:29 +02:00
a90458dd8a Write conclusion to BP investigation. BP investigation now done 2026-05-02 19:16:26 +02:00
d2960b8f0e Write text for figure 4.10 2026-05-02 17:59:44 +02:00
11 changed files with 1945 additions and 784 deletions

View File

@@ -7,19 +7,20 @@
language = {en}, language = {en},
number = {3}, number = {3},
journal = {Mathematical Proceedings of the Cambridge Philosophical Society}, journal = {Mathematical Proceedings of the Cambridge Philosophical Society},
author = {Dirac, P. a. M.}, author = {Dirac, P. A. M.},
month = jul, month = jul,
year = {1939}, year = {1939},
pages = {416--418}, pages = {416--418},
} }
@article{huang_improved_2023, @misc{huang_improved_2023,
title = {Improved {Noisy} {Syndrome} {Decoding} of {Quantum} {LDPC} {Codes} with {Sliding} {Window}}, title = {Improved Noisy Syndrome Decoding of Quantum {LDPC} Codes with Sliding Window},
doi = {10.48550/arXiv.2311.03307}, doi = {10.48550/arXiv.2311.03307},
publisher = {arXiv}, publisher = {arXiv},
author = {Huang, Shilin and Puri, Shruti}, author = {Huang, Shilin and Puri, Shruti},
month = nov, month = nov,
year = {2023}, year = {2023},
howpublished = {arXiv:2311.03307},
} }
@article{huang_increasing_2024, @article{huang_increasing_2024,
@@ -35,17 +36,18 @@
pages = {012453}, pages = {012453},
} }
@article{xu_batched_2025, @misc{xu_batched_2025,
title = {Batched high-rate logical operations for quantum {LDPC} codes}, title = {Batched high-rate logical operations for quantum {LDPC} codes},
doi = {10.48550/arXiv.2510.06159}, doi = {10.48550/arXiv.2510.06159},
publisher = {arXiv}, publisher = {arXiv},
author = {Xu, Qian and Zhou, Hengyun and Bluvstein, Dolev and Cain, Madelyn and Kalinowski, Marcin and Preskill, John and Lukin, Mikhail D. and Maskara, Nishad}, author = {Xu, Qian and Zhou, Hengyun and Bluvstein, Dolev and Cain, Madelyn and Kalinowski, Marcin and Preskill, John and Lukin, Mikhail D. and Maskara, Nishad},
month = oct, month = oct,
year = {2025}, year = {2025},
howpublished = {arXiv:2510.06159},
} }
@article{gidney_stability_2022, @article{gidney_stability_2022,
title = {Stability {Experiments}: {The} {Overlooked} {Dual} of {Memory} {Experiments}}, title = {Stability Experiments: The Overlooked Dual of Memory Experiments},
volume = {6}, volume = {6},
issn = {2521-327X}, issn = {2521-327X},
shorttitle = {Stability {Experiments}}, shorttitle = {Stability {Experiments}},
@@ -57,30 +59,33 @@
pages = {786}, pages = {786},
} }
@article{koutsioumpas_colour_2025, @misc{koutsioumpas_colour_2025,
title = {Colour {Codes} {Reach} {Surface} {Code} {Performance} using {Vibe} {Decoding}}, title = {Colour Codes Reach Surface Code Performance using Vibe Decoding},
doi = {10.48550/arXiv.2508.15743}, doi = {10.48550/arXiv.2508.15743},
publisher = {arXiv}, publisher = {arXiv},
author = {Koutsioumpas, Stergios and Noszko, Tamas and Sayginel, Hasan and Webster, Mark and Roffe, Joschka}, author = {Koutsioumpas, Stergios and Noszko, Tamas and Sayginel, Hasan and Webster, Mark and Roffe, Joschka},
month = aug, month = aug,
year = {2025}, year = {2025},
howpublished = {arXiv:2508.15743},
} }
@article{koutsioumpas_automorphism_2025, @misc{koutsioumpas_automorphism_2025,
title = {Automorphism {Ensemble} {Decoding} of {Quantum} {LDPC} {Codes}}, title = {Automorphism Ensemble Decoding of Quantum {LDPC} Codes},
language = {en}, language = {en},
author = {Koutsioumpas, Stergios and Sayginel, Hasan and Webster, Mark and Browne, Dan E}, author = {Koutsioumpas, Stergios and Sayginel, Hasan and Webster, Mark and Browne, Dan E},
month = mar, month = mar,
year = {2025}, year = {2025},
howpublished = {arXiv:2503.01738},
} }
@article{gottesman_heisenberg_1998, @misc{gottesman_heisenberg_1998,
title = {The {Heisenberg} {Representation} of {Quantum} {Computers}}, title = {The Heisenberg Representation of Quantum Computers},
doi = {10.48550/arXiv.quant-ph/9807006}, doi = {10.48550/arXiv.quant-ph/9807006},
publisher = {arXiv}, publisher = {arXiv},
author = {Gottesman, Daniel}, author = {Gottesman, Daniel},
month = jul, month = jul,
year = {1998}, year = {1998},
howpublished = {arXiv:quant-ph/9807006},
} }
@article{gidney_stim_2021, @article{gidney_stim_2021,
@@ -98,8 +103,8 @@
} }
@phdthesis{higgott_practical_2024, @phdthesis{higgott_practical_2024,
type = {Doctoral}, type = {Ph.D. {Thesis}},
title = {Practical and {Efficient} {Quantum} {Error} {Correction}}, title = {Practical and Efficient Quantum Error Correction},
copyright = {open}, copyright = {open},
language = {eng}, language = {eng},
school = {UCL (University College London)}, school = {UCL (University College London)},
@@ -117,16 +122,17 @@
} }
@misc{gong_toward_2024, @misc{gong_toward_2024,
title = {Toward {Low}-latency {Iterative} {Decoding} of {QLDPC} {Codes} {Under} {Circuit}-{Level} {Noise}}, title = {Toward Low-latency Iterative Decoding of {QLDPC} Codes Under Circuit-Level Noise},
language = {en}, language = {en},
journal = {arXiv.org}, journal = {arXiv.org},
author = {Gong, Anqi and Cammerer, Sebastian and Renes, Joseph M.}, author = {Gong, Anqi and Cammerer, Sebastian and Renes, Joseph M.},
month = mar, month = mar,
howpublished = {arXiv:2403.18901},
year = {2024}, year = {2024},
} }
@article{miao_quaternary_2025, @article{miao_quaternary_2025,
title = {Quaternary {Neural} {Belief} {Propagation} {Decoding} of {Quantum} {LDPC} {Codes} with {Overcomplete} {Check} {Matrices}}, title = {Quaternary Neural Belief Propagation Decoding of Quantum {LDPC} Codes with Overcomplete Check Matrices},
volume = {13}, volume = {13},
issn = {2169-3536}, issn = {2169-3536},
doi = {10.1109/ACCESS.2025.3539475}, doi = {10.1109/ACCESS.2025.3539475},
@@ -138,7 +144,7 @@
} }
@misc{tsouchlos_ccam_2024, @misc{tsouchlos_ccam_2024,
title = {{CCAM} {Summary}}, title = {{CCAM} Summary},
author = {Tsouchlos, Andreas}, author = {Tsouchlos, Andreas},
month = oct, month = oct,
year = {2024}, year = {2024},
@@ -158,7 +164,7 @@
} }
@book{griffiths_introduction_1995, @book{griffiths_introduction_1995,
title = {Introduction to {Quantum} {Mechanics}}, title = {Introduction to Quantum Mechanics},
isbn = {0-13-124405-1}, isbn = {0-13-124405-1},
language = {en}, language = {en},
publisher = {Prentice Hall}, publisher = {Prentice Hall},
@@ -167,7 +173,7 @@
} }
@misc{bradley_tensor_2018, @misc{bradley_tensor_2018,
title = {The {Tensor} {Product}, {Demystified}}, title = {The Tensor Product, Demystified},
author = {Bradley, Tai-Danae}, author = {Bradley, Tai-Danae},
month = nov, month = nov,
year = {2018}, year = {2018},
@@ -175,7 +181,7 @@
@book{nielsen_quantum_2010, @book{nielsen_quantum_2010,
address = {Cambridge}, address = {Cambridge},
title = {Quantum {Computation} and {Quantum} {Information}: 10th {Anniversary} {Edition}}, title = {Quantum Computation and Quantum Information: 10th Anniversary Edition},
isbn = {978-0-511-97666-7}, isbn = {978-0-511-97666-7},
shorttitle = {Quantum {Computation} and {Quantum} {Information}}, shorttitle = {Quantum {Computation} and {Quantum} {Information}},
doi = {10.1017/CBO9780511976667}, doi = {10.1017/CBO9780511976667},
@@ -187,7 +193,7 @@
} }
@article{geiselhart_automorphism_2021, @article{geiselhart_automorphism_2021,
title = {Automorphism {Ensemble} {Decoding} of {Reed}{Muller} {Codes}}, title = {Automorphism Ensemble Decoding of ReedMuller Codes},
volume = {69}, volume = {69},
issn = {1558-0857}, issn = {1558-0857},
doi = {10.1109/TCOMM.2021.3098798}, doi = {10.1109/TCOMM.2021.3098798},
@@ -199,19 +205,20 @@
pages = {6424--6438}, pages = {6424--6438},
} }
@article{derks_designing_2025, @misc{derks_designing_2025,
title = {Designing fault-tolerant circuits using detector error models}, title = {Designing fault-tolerant circuits using detector error models},
doi = {10.48550/arXiv.2407.13826}, doi = {10.48550/arXiv.2407.13826},
publisher = {arXiv}, publisher = {arXiv},
author = {Derks, Peter-Jan H. S. and Townsend-Teague, Alex and Burchards, Ansgar G. and Eisert, Jens}, author = {Derks, Peter-Jan H. S. and Townsend-Teague, Alex and Burchards, Ansgar G. and Eisert, Jens},
month = oct, month = oct,
year = {2025}, year = {2025},
howpublished = {arXiv:2407.13826},
} }
@phdthesis{klinke_neural_2025, @phdthesis{klinke_neural_2025,
address = {Karlsruhe}, address = {Karlsruhe},
type = {Bachelor's {Thesis}}, type = {Bachelor's {Thesis}},
title = {Neural {Belief} {Propagation} {Ensemble} {Decoding} of {Quantum} {LDPC} {Codes}}, title = {Neural Belief Propagation Ensemble Decoding of Quantum {LDPC} Codes},
language = {English}, language = {English},
school = {KIT}, school = {KIT},
author = {Klinke, Jeremi}, author = {Klinke, Jeremi},
@@ -219,14 +226,15 @@
year = {2025}, year = {2025},
} }
@article{camps-moreno_toward_2024, @misc{camps-moreno_toward_2024,
title = {Toward {Quantum} {CSS}-{T} {Codes} from {Sparse} {Matrices}}, title = {Toward Quantum {CSS}-{T} Codes from Sparse Matrices},
doi = {10.48550/arXiv.2406.00425}, doi = {10.48550/arXiv.2406.00425},
abstract = {CSS-T codes were recently introduced as quantum error-correcting codes that respect a transversal gate. A CSS-T code depends on a pair \$(C\_1, C\_2)\$ of binary linear codes \$C\_1\$ and \$C\_2\$ that satisfy certain conditions. We prove that \$C\_1\$ and \$C\_2\$ form a CSS-T pair if and only if \$C\_2 {\textbackslash}subset {\textbackslash}operatorname\{Hull\}(C\_1) {\textbackslash}cap {\textbackslash}operatorname\{Hull\}(C\_1{\textasciicircum}2)\$, where the hull of a code is the intersection of the code with its dual. We show that if \$(C\_1,C\_2)\$ is a CSS-T pair, and the code \$C\_2\$ is degenerated on \${\textbackslash}\{i{\textbackslash}\}\$, meaning that the \$i{\textasciicircum}\{th\}\$-entry is zero for all the elements in \$C\_2\$, then the pair of punctured codes \$(C\_1{\textbar}\_i,C\_2{\textbar}\_i)\$ is also a CSS-T pair. Finally, we provide Magma code based on our results and quasi-cyclic codes as a step toward finding quantum LDPC or LDGM CSS-T codes computationally.}, abstract = {CSS-T codes were recently introduced as quantum error-correcting codes that respect a transversal gate. A CSS-T code depends on a pair \$(C\_1, C\_2)\$ of binary linear codes \$C\_1\$ and \$C\_2\$ that satisfy certain conditions. We prove that \$C\_1\$ and \$C\_2\$ form a CSS-T pair if and only if \$C\_2 {\textbackslash}subset {\textbackslash}operatorname\{Hull\}(C\_1) {\textbackslash}cap {\textbackslash}operatorname\{Hull\}(C\_1{\textasciicircum}2)\$, where the hull of a code is the intersection of the code with its dual. We show that if \$(C\_1,C\_2)\$ is a CSS-T pair, and the code \$C\_2\$ is degenerated on \${\textbackslash}\{i{\textbackslash}\}\$, meaning that the \$i{\textasciicircum}\{th\}\$-entry is zero for all the elements in \$C\_2\$, then the pair of punctured codes \$(C\_1{\textbar}\_i,C\_2{\textbar}\_i)\$ is also a CSS-T pair. Finally, we provide Magma code based on our results and quasi-cyclic codes as a step toward finding quantum LDPC or LDGM CSS-T codes computationally.},
publisher = {arXiv}, publisher = {arXiv},
author = {Camps-Moreno, Eduardo and López, Hiram H. and Matthews, Gretchen L. and McMillon, Emily}, author = {Camps-Moreno, Eduardo and López, Hiram H. and Matthews, Gretchen L. and McMillon, Emily},
month = jun, month = jun,
year = {2024}, year = {2024},
howpublished = {arXiv:2406.00425},
} }
@article{roffe_quantum_2019, @article{roffe_quantum_2019,
@@ -244,13 +252,14 @@
pages = {226--245}, pages = {226--245},
} }
@article{gottesman_introduction_2009, @misc{gottesman_introduction_2009,
title = {An {Introduction} to {Quantum} {Error} {Correction} and {Fault}-{Tolerant} {Quantum} {Computation}}, title = {An Introduction to Quantum Error Correction and Fault-Tolerant Quantum Computation},
doi = {10.48550/arXiv.0904.2557}, doi = {10.48550/arXiv.0904.2557},
publisher = {arXiv}, publisher = {arXiv},
author = {Gottesman, Daniel}, author = {Gottesman, Daniel},
month = apr, month = apr,
year = {2009}, year = {2009},
howpublished = {arXiv:0904.2557},
} }
@article{gottesman_theory_1998, @article{gottesman_theory_1998,
@@ -266,35 +275,38 @@
pages = {127--137}, pages = {127--137},
} }
@article{calderbank_quantum_1997, @misc{calderbank_quantum_1997,
title = {Quantum {Error} {Correction} via {Codes} over {GF}(4)}, title = {Quantum Error Correction via Codes over {GF}(4)},
doi = {10.48550/arXiv.quant-ph/9608006}, doi = {10.48550/arXiv.quant-ph/9608006},
publisher = {arXiv}, publisher = {arXiv},
author = {Calderbank, A. R. and Rains, E. M. and Shor, P. W. and Sloane, N. J. A.}, author = {Calderbank, A. R. and Rains, E. M. and Shor, P. W. and Sloane, N. J. A.},
month = sep, month = sep,
year = {1997}, year = {1997},
howpublished = {arXiv:quant-ph/9608006},
} }
@article{gottesman_stabilizer_1997, @misc{gottesman_stabilizer_1997,
title = {Stabilizer {Codes} and {Quantum} {Error} {Correction}}, title = {Stabilizer Codes and Quantum Error Correction},
doi = {10.48550/arXiv.quant-ph/9705052}, doi = {10.48550/arXiv.quant-ph/9705052},
publisher = {arXiv}, publisher = {arXiv},
author = {Gottesman, Daniel}, author = {Gottesman, Daniel},
month = may, month = may,
year = {1997}, year = {1997},
howpublished = {Ph.D. {Thesis}, arXiv:quant-ph/9705052},
} }
@article{shor_fault-tolerant_1997, @misc{shor_fault-tolerant_1997,
title = {Fault-tolerant quantum computation}, title = {Fault-tolerant quantum computation},
doi = {10.48550/arXiv.quant-ph/9605011}, doi = {10.48550/arXiv.quant-ph/9605011},
publisher = {arXiv}, publisher = {arXiv},
author = {Shor, Peter W.}, author = {Shor, Peter W.},
month = mar, month = mar,
year = {1997}, year = {1997},
howpublished = {arXiv:quant-ph/9605011},
} }
@article{divincenzo_fault-tolerant_1996, @article{divincenzo_fault-tolerant_1996,
title = {Fault-{Tolerant} {Error} {Correction} with {Efficient} {Quantum} {Codes}}, title = {Fault-Tolerant Error Correction with Efficient Quantum Codes},
volume = {77}, volume = {77},
issn = {0031-9007, 1079-7114}, issn = {0031-9007, 1079-7114},
doi = {10.1103/PhysRevLett.77.3260}, doi = {10.1103/PhysRevLett.77.3260},
@@ -335,7 +347,7 @@
} }
@article{terhal_quantum_2015, @article{terhal_quantum_2015,
title = {Quantum {Error} {Correction} for {Quantum} {Memories}}, title = {Quantum Error Correction for Quantum Memories},
volume = {87}, volume = {87},
issn = {0034-6861, 1539-0756}, issn = {0034-6861, 1539-0756},
doi = {10.1103/RevModPhys.87.307}, doi = {10.1103/RevModPhys.87.307},
@@ -353,7 +365,7 @@
title = {Guidelines for snowballing in systematic literature studies and a replication in software engineering}, title = {Guidelines for snowballing in systematic literature studies and a replication in software engineering},
isbn = {978-1-4503-2476-2}, isbn = {978-1-4503-2476-2},
doi = {10.1145/2601248.2601268}, doi = {10.1145/2601248.2601268},
booktitle = {Proceedings of the 18th {International} {Conference} on {Evaluation} and {Assessment} in {Software} {Engineering}}, booktitle = {Proceedings of the 18th International Conference on Evaluation and Assessment in Software Engineering},
publisher = {Association for Computing Machinery}, publisher = {Association for Computing Machinery},
author = {Wohlin, Claes}, author = {Wohlin, Claes},
month = may, month = may,
@@ -374,20 +386,21 @@
pages = {83--84}, pages = {83--84},
} }
@article{blume-kohout_estimating_2025, @misc{blume-kohout_estimating_2025,
title = {Estimating detector error models from syndrome data}, title = {Estimating detector error models from syndrome data},
doi = {10.48550/arXiv.2504.14643}, doi = {10.48550/arXiv.2504.14643},
publisher = {arXiv}, publisher = {arXiv},
author = {Blume-Kohout, Robin and Young, Kevin}, author = {Blume-Kohout, Robin and Young, Kevin},
month = apr, month = apr,
year = {2025}, year = {2025},
howpublished = {arXiv:2504.14643},
} }
@inproceedings{chatterjee_quantum_2023, @inproceedings{chatterjee_quantum_2023,
title = {Quantum {Error} {Correction} {For} {Dummies}}, title = {Quantum Error Correction For Dummies},
volume = {01}, volume = {01},
doi = {10.1109/QCE57702.2023.00017}, doi = {10.1109/QCE57702.2023.00017},
booktitle = {2023 {IEEE} {International} {Conference} on {Quantum} {Computing} and {Engineering} ({QCE})}, booktitle = {2023 {IEEE} International Conference on Quantum Computing and Engineering ({QCE})},
author = {Chatterjee, Avimita and Phalak, Koustubh and Ghosh, Swaroop}, author = {Chatterjee, Avimita and Phalak, Koustubh and Ghosh, Swaroop},
month = sep, month = sep,
year = {2023}, year = {2023},
@@ -395,7 +408,7 @@
} }
@inproceedings{petersen_systematic_2008, @inproceedings{petersen_systematic_2008,
title = {Systematic {Mapping} {Studies} in {Software} {Engineering}}, title = {Systematic Mapping Studies in Software Engineering},
doi = {10.14236/ewic/EASE2008.8}, doi = {10.14236/ewic/EASE2008.8},
language = {en}, language = {en},
publisher = {BCS Learning \& Development}, publisher = {BCS Learning \& Development},
@@ -405,7 +418,7 @@
} }
@article{postler_demonstration_2024, @article{postler_demonstration_2024,
title = {Demonstration of {Fault}-{Tolerant} {Steane} {Quantum} {Error} {Correction}}, title = {Demonstration of Fault-Tolerant Steane Quantum Error Correction},
volume = {5}, volume = {5},
doi = {10.1103/PRXQuantum.5.030326}, doi = {10.1103/PRXQuantum.5.030326},
number = {3}, number = {3},
@@ -418,7 +431,7 @@
} }
@article{cao_exact_2025, @article{cao_exact_2025,
title = {Exact {Decoding} of {Quantum} {Error}-{Correcting} {Codes}}, title = {Exact Decoding of Quantum Error-Correcting Codes},
volume = {134}, volume = {134},
doi = {10.1103/PhysRevLett.134.190603}, doi = {10.1103/PhysRevLett.134.190603},
number = {19}, number = {19},
@@ -431,13 +444,14 @@
} }
@misc{beni_tesseract_2025, @misc{beni_tesseract_2025,
title = {Tesseract: {A} {Search}-{Based} {Decoder} for {Quantum} {Error} {Correction}}, title = {Tesseract: {A} Search-Based Decoder for Quantum Error Correction},
shorttitle = {Tesseract}, shorttitle = {Tesseract},
doi = {10.48550/arXiv.2503.10988}, doi = {10.48550/arXiv.2503.10988},
publisher = {arXiv}, publisher = {arXiv},
author = {Beni, Laleh Aghababaie and Higgott, Oscar and Shutty, Noah}, author = {Beni, Laleh Aghababaie and Higgott, Oscar and Shutty, Noah},
month = aug, month = aug,
year = {2025}, year = {2025},
howpublished = {arXiv:2503.10988},
} }
@article{bausch_learning_2024, @article{bausch_learning_2024,
@@ -457,12 +471,13 @@
} }
@misc{bhardwaj_adaptive_2025, @misc{bhardwaj_adaptive_2025,
title = {Adaptive {Estimation} of {Drifting} {Noise} in {Quantum} {Error} {Correction}}, title = {Adaptive Estimation of Drifting Noise in Quantum Error Correction},
doi = {10.48550/arXiv.2511.09491}, doi = {10.48550/arXiv.2511.09491},
publisher = {arXiv}, publisher = {arXiv},
author = {Bhardwaj, Devansh and Takou, Evangelia and Lin, Yingjia and Brown, Kenneth R.}, author = {Bhardwaj, Devansh and Takou, Evangelia and Lin, Yingjia and Brown, Kenneth R.},
month = nov, month = nov,
year = {2025}, year = {2025},
howpublished = {arXiv:2511.09491},
} }
@article{roffe_decoding_2020, @article{roffe_decoding_2020,
@@ -492,7 +507,7 @@
} }
@article{bausch_learning_2024-1, @article{bausch_learning_2024-1,
title = {Learning to {Decode} the {Surface} {Code} with a {Recurrent}, {Transformer}-{Based} {Neural} {Network}}, title = {Learning to Decode the Surface Code with a Recurrent, Transformer-Based Neural Network},
volume = {635}, volume = {635},
issn = {0028-0836, 1476-4687}, issn = {0028-0836, 1476-4687},
doi = {10.1038/s41586-024-08148-8}, doi = {10.1038/s41586-024-08148-8},
@@ -511,15 +526,17 @@
author = {Lin, Hsiang-Ku and Lim, Pak Kau and Kovalev, Alexey A. and Pryadko, Leonid P.}, author = {Lin, Hsiang-Ku and Lim, Pak Kau and Kovalev, Alexey A. and Pryadko, Leonid P.},
month = aug, month = aug,
year = {2025}, year = {2025},
howpublished = {arXiv:2506.16910},
} }
@misc{fan_accelerating_2025, @misc{fan_accelerating_2025,
title = {Accelerating {BP}-{OSD} {Decoder} for {QLDPC} {Codes} with {Local} {Syndrome}-{Based} {Preprocessing}}, title = {Accelerating {BP}-{OSD} Decoder for {QLDPC} Codes with Local Syndrome-Based Preprocessing},
doi = {10.48550/arXiv.2509.01892}, doi = {10.48550/arXiv.2509.01892},
publisher = {arXiv}, publisher = {arXiv},
author = {Fan, Wenxuan and Suzuki, Yasunari and Ravi, Gokul Subramanian and Ueno, Yosuke and Inoue, Koji and Tanimoto, Teruo}, author = {Fan, Wenxuan and Suzuki, Yasunari and Ravi, Gokul Subramanian and Ueno, Yosuke and Inoue, Koji and Tanimoto, Teruo},
month = sep, month = sep,
year = {2025}, year = {2025},
howpublished = {arXiv:2509.01892},
} }
@misc{senior_scalable_2025, @misc{senior_scalable_2025,
@@ -529,14 +546,16 @@
author = {Senior, Andrew W. and Edlich, Thomas and Heras, Francisco J. H. and Zhang, Lei M. and Higgott, Oscar and Spencer, James S. and Applebaum, Taylor and Blackwell, Sam and Ledford, Justin and Žemgulytė, Akvilė and Žídek, Augustin and Shutty, Noah and Cowie, Andrew and Li, Yin and Holland, George and Brooks, Peter and Beattie, Charlie and Newman, Michael and Davies, Alex and Jones, Cody and Boixo, Sergio and Neven, Hartmut and Kohli, Pushmeet and Bausch, Johannes}, author = {Senior, Andrew W. and Edlich, Thomas and Heras, Francisco J. H. and Zhang, Lei M. and Higgott, Oscar and Spencer, James S. and Applebaum, Taylor and Blackwell, Sam and Ledford, Justin and Žemgulytė, Akvilė and Žídek, Augustin and Shutty, Noah and Cowie, Andrew and Li, Yin and Holland, George and Brooks, Peter and Beattie, Charlie and Newman, Michael and Davies, Alex and Jones, Cody and Boixo, Sergio and Neven, Hartmut and Kohli, Pushmeet and Bausch, Johannes},
month = dec, month = dec,
year = {2025}, year = {2025},
howpublished = {arXiv:2512.07737},
} }
@misc{wang_fully_2025, @misc{wang_fully_2025,
title = {Fully {Parallelized} {BP} {Decoding} for {Quantum} {LDPC} {Codes} {Can} {Outperform} {BP}-{OSD}}, title = {Fully Parallelized {BP} Decoding for Quantum {LDPC} Codes Can Outperform {BP}-{OSD}},
language = {en}, language = {en},
author = {Wang, Ming and Li, Ang and Mueller, Frank}, author = {Wang, Ming and Li, Ang and Mueller, Frank},
month = jun, month = jun,
year = {2025}, year = {2025},
howpublished = {arXiv:2507.00254},
} }
@misc{ye_beam_2025, @misc{ye_beam_2025,
@@ -565,7 +584,7 @@
} }
@article{higgott_improved_2023, @article{higgott_improved_2023,
title = {Improved {Decoding} of {Circuit} {Noise} and {Fragile} {Boundaries} of {Tailored} {Surface} {Codes}}, title = {Improved Decoding of Circuit Noise and Fragile Boundaries of Tailored Surface Codes},
volume = {13}, volume = {13},
doi = {10.1103/PhysRevX.13.031007}, doi = {10.1103/PhysRevX.13.031007},
number = {3}, number = {3},
@@ -578,31 +597,34 @@
} }
@misc{tsubouchi_degeneracy_2025, @misc{tsubouchi_degeneracy_2025,
title = {Degeneracy {Cutting}: {A} {Local} and {Efficient} {Post}-{Processing} for {Belief} {Propagation} {Decoding} of {Quantum} {Low}-{Density} {Parity}-{Check} {Codes}}, title = {Degeneracy Cutting: {A} Local and Efficient Post-Processing for Belief Propagation Decoding of Quantum Low-Density Parity-Check Codes},
shorttitle = {Degeneracy {Cutting}}, shorttitle = {Degeneracy {Cutting}},
doi = {10.48550/arXiv.2510.08695}, doi = {10.48550/arXiv.2510.08695},
publisher = {arXiv}, publisher = {arXiv},
author = {Tsubouchi, Kento and Yamasaki, Hayata and Tamiya, Shiro}, author = {Tsubouchi, Kento and Yamasaki, Hayata and Tamiya, Shiro},
month = oct, month = oct,
year = {2025}, year = {2025},
howpublished = {arXiv:2510.08695},
} }
@misc{lee_scalable_2025, @misc{lee_scalable_2025,
title = {Scalable {Neural} {Decoders} for {Practical} {Real}-{Time} {Quantum} {Error} {Correction}}, title = {Scalable Neural Decoders for Practical Real-Time Quantum Error Correction},
doi = {10.48550/arXiv.2510.22724}, doi = {10.48550/arXiv.2510.22724},
publisher = {arXiv}, publisher = {arXiv},
author = {Lee, Changwon and Hur, Tak and Park, Daniel K.}, author = {Lee, Changwon and Hur, Tak and Park, Daniel K.},
month = oct, month = oct,
year = {2025}, year = {2025},
howpublished = {arXiv:2510.22724},
} }
@misc{maan_decoding_2025, @misc{maan_decoding_2025,
title = {Decoding {Correlated} {Errors} in {Quantum} {LDPC} {Codes}}, title = {Decoding Correlated Errors in Quantum {LDPC} Codes},
doi = {10.48550/arXiv.2510.14060}, doi = {10.48550/arXiv.2510.14060},
publisher = {arXiv}, publisher = {arXiv},
author = {Maan, Arshpreet Singh and Herrero, Francisco-Garcia and Paler, Alexandru and Savin, Valentin}, author = {Maan, Arshpreet Singh and Herrero, Francisco-Garcia and Paler, Alexandru and Savin, Valentin},
month = oct, month = oct,
year = {2025}, year = {2025},
howpublished = {arXiv:2510.14060},
} }
@article{skoric_parallel_2023, @article{skoric_parallel_2023,
@@ -622,7 +644,7 @@
} }
@article{higgott_sparse_2025, @article{higgott_sparse_2025,
title = {Sparse {Blossom}: correcting a million errors per core second with minimum-weight matching}, title = {Sparse Blossom: correcting a million errors per core second with minimum-weight matching},
volume = {9}, volume = {9},
shorttitle = {Sparse {Blossom}}, shorttitle = {Sparse {Blossom}},
doi = {10.22331/q-2025-01-20-1600}, doi = {10.22331/q-2025-01-20-1600},
@@ -636,7 +658,7 @@
} }
@article{breuckmann_quantum_2021, @article{breuckmann_quantum_2021,
title = {Quantum {Low}-{Density} {Parity}-{Check} {Codes}}, title = {Quantum Low-Density Parity-Check Codes},
volume = {2}, volume = {2},
doi = {10.1103/PRXQuantum.2.040101}, doi = {10.1103/PRXQuantum.2.040101},
number = {4}, number = {4},
@@ -649,10 +671,10 @@
} }
@inproceedings{gokduman_erasure_2024, @inproceedings{gokduman_erasure_2024,
title = {Erasure {Decoding} for {Quantum} {LDPC} {Codes} via {Belief} {Propagation} with {Guided} {Decimation}}, title = {Erasure Decoding for Quantum {LDPC} Codes via Belief Propagation with Guided Decimation},
issn = {2836-4503}, issn = {2836-4503},
doi = {10.1109/Allerton63246.2024.10735275}, doi = {10.1109/Allerton63246.2024.10735275},
booktitle = {2024 60th {Annual} {Allerton} {Conference} on {Communication}, {Control}, and {Computing}}, booktitle = {2024 60th Annual Allerton Conference on Communication, Control, and Computing},
author = {Gökduman, Mert and Yao, Hanwen and Pfister, Henry D.}, author = {Gökduman, Mert and Yao, Hanwen and Pfister, Henry D.},
month = sep, month = sep,
year = {2024}, year = {2024},
@@ -660,13 +682,14 @@
} }
@misc{swierkowska_eccentric_2025, @misc{swierkowska_eccentric_2025,
title = {{ECCentric}: {An} {Empirical} {Analysis} of {Quantum} {Error} {Correction} {Codes}}, title = {ECCentric: An Empirical Analysis of Quantum Error Correction Codes},
shorttitle = {{ECCentric}}, shorttitle = {{ECCentric}},
doi = {10.48550/arXiv.2511.01062}, doi = {10.48550/arXiv.2511.01062},
publisher = {arXiv}, publisher = {arXiv},
author = {{\'S}wierkowska, Aleksandra and Pflieger, Jannik and Giortamis, Emmanouil and Bhatotia, Pramod}, author = {{\'S}wierkowska, Aleksandra and Pflieger, Jannik and Giortamis, Emmanouil and Bhatotia, Pramod},
month = nov, month = nov,
year = {2025}, year = {2025},
howpublished = {arXiv:2511.01062},
} }
@phdthesis{guernut_fault-tolerant_2025, @phdthesis{guernut_fault-tolerant_2025,
@@ -693,7 +716,7 @@
} }
@article{tan_scalable_2023, @article{tan_scalable_2023,
title = {Scalable {Surface}-{Code} {Decoders} with {Parallelization} in {Time}}, title = {Scalable Surface-Code Decoders with Parallelization in Time},
volume = {4}, volume = {4},
doi = {10.1103/PRXQuantum.4.040344}, doi = {10.1103/PRXQuantum.4.040344},
number = {4}, number = {4},
@@ -735,21 +758,23 @@
} }
@misc{kuo_fault-tolerant_2024, @misc{kuo_fault-tolerant_2024,
title = {Fault-{Tolerant} {Belief} {Propagation} for {Practical} {Quantum} {Memory}}, title = {Fault-Tolerant Belief Propagation for Practical Quantum Memory},
doi = {10.48550/arXiv.2409.18689}, doi = {10.48550/arXiv.2409.18689},
publisher = {arXiv}, publisher = {arXiv},
author = {Kuo, Kao-Yueh and Lai, Ching-Yi}, author = {Kuo, Kao-Yueh and Lai, Ching-Yi},
month = sep, month = sep,
year = {2024}, year = {2024},
howpublished = {arXiv:2409.18689},
} }
@misc{poor_ultra_2025, @misc{poor_ultra_2025,
title = {Ultra {Low} {Overhead} {Syndrome} {Extraction} for the {Steane} code}, title = {Ultra Low Overhead Syndrome Extraction for the Steane code},
doi = {10.48550/arXiv.2511.13700}, doi = {10.48550/arXiv.2511.13700},
publisher = {arXiv}, publisher = {arXiv},
author = {Poór, Boldizsár and Rodatz, Benjamin and Kissinger, Aleks}, author = {Poór, Boldizsár and Rodatz, Benjamin and Kissinger, Aleks},
month = nov, month = nov,
year = {2025}, year = {2025},
howpublished = {arXiv:2511.13700},
} }
@article{feynman_simulating_1982, @article{feynman_simulating_1982,
@@ -770,15 +795,16 @@
title = {Algorithms for quantum computation: discrete logarithms and factoring}, title = {Algorithms for quantum computation: discrete logarithms and factoring},
shorttitle = {Algorithms for quantum computation}, shorttitle = {Algorithms for quantum computation},
doi = {10.1109/SFCS.1994.365700}, doi = {10.1109/SFCS.1994.365700},
booktitle = {Proceedings 35th {Annual} {Symposium} on {Foundations} of {Computer} {Science}}, booktitle = {Proc. Annual Symposium on Foundations of Computer Science},
author = {Shor, P.W.}, author = {Shor, P.W.},
address = {Santa Fe},
month = nov, month = nov,
year = {1994}, year = {1994},
pages = {124--134}, pages = {124--134},
} }
@article{preskill_quantum_2018, @article{preskill_quantum_2018,
title = {Quantum {Computing} in the {NISQ} era and beyond}, title = {Quantum Computing in the {NISQ} era and beyond},
volume = {2}, volume = {2},
doi = {10.22331/q-2018-08-06-79}, doi = {10.22331/q-2018-08-06-79},
language = {en-GB}, language = {en-GB},
@@ -791,7 +817,7 @@
} }
@misc{google_quantum_ai_quantum_nodate, @misc{google_quantum_ai_quantum_nodate,
title = {Quantum {Computing} {Roadmap}}, title = {Quantum Computing Roadmap},
language = {en}, language = {en},
journal = {Google Quantum AI}, journal = {Google Quantum AI},
author = {{Google Quantum AI}}, author = {{Google Quantum AI}},
@@ -811,7 +837,7 @@
} }
@article{zhang_classical_2023, @article{zhang_classical_2023,
title = {A {Classical} {Architecture} for {Digital} {Quantum} {Computers}}, title = {A Classical Architecture for Digital Quantum Computers},
volume = {5}, volume = {5},
doi = {10.1145/3626199}, doi = {10.1145/3626199},
number = {1}, number = {1},
@@ -829,6 +855,7 @@
author = {Caune, Laura and Skoric, Luka and Blunt, Nick S. and Ruban, Archibald and McDaniel, Jimmy and Valery, Joseph A. and Patterson, Andrew D. and Gramolin, Alexander V. and Majaniemi, Joonas and Barnes, Kenton M. and Bialas, Tomasz and Buğdaycı, Okan and Crawford, Ophelia and Gehér, György P. and Krovi, Hari and Matekole, Elisha and Topal, Canberk and Poletto, Stefano and Bryant, Michael and Snyder, Kalan and Gillespie, Neil I. and Jones, Glenn and Johar, Kauser and Campbell, Earl T. and Hill, Alexander D.}, author = {Caune, Laura and Skoric, Luka and Blunt, Nick S. and Ruban, Archibald and McDaniel, Jimmy and Valery, Joseph A. and Patterson, Andrew D. and Gramolin, Alexander V. and Majaniemi, Joonas and Barnes, Kenton M. and Bialas, Tomasz and Buğdaycı, Okan and Crawford, Ophelia and Gehér, György P. and Krovi, Hari and Matekole, Elisha and Topal, Canberk and Poletto, Stefano and Bryant, Michael and Snyder, Kalan and Gillespie, Neil I. and Jones, Glenn and Johar, Kauser and Campbell, Earl T. and Hill, Alexander D.},
month = oct, month = oct,
year = {2024}, year = {2024},
howpublished = {arXiv:2410.05202},
} }
@misc{ye_beam_2025-1, @misc{ye_beam_2025-1,
@@ -838,14 +865,15 @@
author = {Ye, Min and Wecker, Dave and Delfosse, Nicolas}, author = {Ye, Min and Wecker, Dave and Delfosse, Nicolas},
month = dec, month = dec,
year = {2025}, year = {2025},
howpublished = {arXiv:2512.07057},
} }
@misc{noauthor_reproducing_nodate, @misc{noauthor_reproducing_nodate,
title = {Reproducing repetition and {Shor} code simulations using stim}, title = {Reproducing repetition and Shor code simulations using stim},
} }
@misc{noauthor_tutorial_nodate, @misc{noauthor_tutorial_nodate,
title = {Tutorial - {Estimating} the {Surface} {Code} {Threshold}{NordIQuEst} {Application} {Library}}, title = {Tutorial - Estimating the Surface Code Threshold — NordIQuEst Application Library},
} }
@misc{noauthor_simulating_nodate, @misc{noauthor_simulating_nodate,
@@ -853,7 +881,7 @@
} }
@article{ryan-anderson_realization_2021, @article{ryan-anderson_realization_2021,
title = {Realization of {Real}-{Time} {Fault}-{Tolerant} {Quantum} {Error} {Correction}}, title = {Realization of Real-Time Fault-Tolerant Quantum Error Correction},
volume = {11}, volume = {11},
doi = {10.1103/PhysRevX.11.041058}, doi = {10.1103/PhysRevX.11.041058},
number = {4}, number = {4},
@@ -880,11 +908,11 @@
} }
@misc{noauthor_tutorial_nodate-1, @misc{noauthor_tutorial_nodate-1,
title = {Tutorial - {Fault}-{Tolerant} {Quantum} {Computing} with {CSS} codes}, title = {Tutorial - Fault-Tolerant Quantum Computing with {CSS} codes},
} }
@article{panteleev_degenerate_2021, @article{panteleev_degenerate_2021,
title = {Degenerate {Quantum} {LDPC} {Codes} {With} {Good} {Finite} {Length} {Performance}}, title = {Degenerate Quantum {LDPC} Codes With Good Finite Length Performance},
volume = {5}, volume = {5},
doi = {10.22331/q-2021-11-22-585}, doi = {10.22331/q-2021-11-22-585},
language = {en-GB}, language = {en-GB},
@@ -897,27 +925,29 @@
} }
@article{babar_fifteen_2015, @article{babar_fifteen_2015,
title = {Fifteen {Years} of {Quantum} {LDPC} {Coding} and {Improved} {Decoding} {Strategies}}, title = {Fifteen Years of Quantum {LDPC} Coding and Improved Decoding Strategies},
volume = {3}, volume = {3},
issn = {2169-3536}, issn = {2169-3536},
doi = {10.1109/ACCESS.2015.2503267}, doi = {10.1109/ACCESS.2015.2503267},
journal = {IEEE Access}, journal = {IEEE Access},
author = {Babar, Zunaira and Botsinis, Panagiotis and Alanis, Dimitrios and Ng, Soon Xin and Hanzo, Lajos}, author = {Babar, Zunaira and Botsinis, Panagiotis and Alanis, Dimitrios and Ng, Soon Xin and Hanzo, Lajos},
month = nov,
year = {2015}, year = {2015},
pages = {2492--2519}, pages = {2492--2519},
} }
@misc{yao_belief_2024, @misc{yao_belief_2024,
title = {Belief {Propagation} {Decoding} of {Quantum} {LDPC} {Codes} with {Guided} {Decimation}}, title = {Belief Propagation Decoding of Quantum {LDPC} Codes with Guided Decimation},
doi = {10.48550/arXiv.2312.10950}, doi = {10.48550/arXiv.2312.10950},
publisher = {arXiv}, publisher = {arXiv},
author = {Yao, Hanwen and Laban, Waleed Abu and Häger, Christian and Amat, Alexandre Graell i and Pfister, Henry D.}, author = {Yao, Hanwen and Laban, Waleed Abu and Häger, Christian and Amat, Alexandre Graell i and Pfister, Henry D.},
month = jun, month = jun,
year = {2024}, year = {2024},
howpublished = {arXiv:2312.10950},
} }
@article{sharon_efficient_2007, @article{sharon_efficient_2007,
title = {Efficient {Serial} {Message}-{Passing} {Schedules} for {LDPC} {Decoding}}, title = {Efficient Serial Message-Passing Schedules for {LDPC} Decoding},
volume = {53}, volume = {53},
issn = {1557-9654}, issn = {1557-9654},
doi = {10.1109/TIT.2007.907507}, doi = {10.1109/TIT.2007.907507},
@@ -943,7 +973,7 @@
} }
@book{ryan_channel_2009, @book{ryan_channel_2009,
title = {Channel {Codes}: {Classical} and {Modern}}, title = {Channel Codes: Classical and Modern},
isbn = {978-1-139-48301-8}, isbn = {978-1-139-48301-8},
shorttitle = {Channel {Codes}}, shorttitle = {Channel {Codes}},
language = {en}, language = {en},
@@ -954,7 +984,7 @@
} }
@book{macwilliams_theory_1977, @book{macwilliams_theory_1977,
title = {The {Theory} of {Error}-correcting {Codes}}, title = {The Theory of Error-correcting Codes},
isbn = {978-0-444-85010-2}, isbn = {978-0-444-85010-2},
language = {en}, language = {en},
publisher = {Elsevier}, publisher = {Elsevier},
@@ -964,7 +994,7 @@
@book{richardson_modern_2008, @book{richardson_modern_2008,
address = {Cambridge}, address = {Cambridge},
title = {Modern {Coding} {Theory}}, title = {Modern Coding Theory},
isbn = {978-0-521-85229-6}, isbn = {978-0-521-85229-6},
doi = {10.1017/CBO9780511791338}, doi = {10.1017/CBO9780511791338},
publisher = {Cambridge University Press}, publisher = {Cambridge University Press},
@@ -973,7 +1003,7 @@
} }
@phdthesis{gallager_low_1960, @phdthesis{gallager_low_1960,
type = {Thesis}, type = {Ph.D. {Thesis}},
title = {Low density parity check codes}, title = {Low density parity check codes},
copyright = {M.I.T. theses are protected by copyright. They may be viewed from this source for any purpose, but reproduction or distribution in any format is prohibited without written permission. See provided URL for inquiries about permission.}, copyright = {M.I.T. theses are protected by copyright. They may be viewed from this source for any purpose, but reproduction or distribution in any format is prohibited without written permission. See provided URL for inquiries about permission.},
language = {eng}, language = {eng},
@@ -986,11 +1016,11 @@
title = {Fully parallel window decoder architecture for spatially-coupled {LDPC} codes}, title = {Fully parallel window decoder architecture for spatially-coupled {LDPC} codes},
issn = {1938-1883}, issn = {1938-1883},
doi = {10.1109/ICC.2016.7511553}, doi = {10.1109/ICC.2016.7511553},
booktitle = {2016 {IEEE} {International} {Conference} on {Communications} ({ICC})}, booktitle = {Proc. {IEEE} International Conference on Communications ({ICC})},
author = {Hassan, Najeeb Ul and Schlüter, Martin and Fettweis, Gerhard P.}, author = {Hassan, Najeeb Ul and Schlüter, Martin and Fettweis, Gerhard P.},
address = {Kuala Lumpur},
month = may, month = may,
year = {2016}, year = {2016},
pages = {1--6},
} }
@article{costello_spatially_2014, @article{costello_spatially_2014,
@@ -1019,7 +1049,7 @@
} }
@article{kang_quits_2025, @article{kang_quits_2025,
title = {{QUITS}: {A} modular {Qldpc} code {circUIT} {Simulator}}, title = {{QUITS}: {A} modular Qldpc code circUIT Simulator},
volume = {9}, volume = {9},
issn = {2521-327X}, issn = {2521-327X},
shorttitle = {{QUITS}}, shorttitle = {{QUITS}},
@@ -1033,7 +1063,7 @@
@book{griffiths_consistent_2001, @book{griffiths_consistent_2001,
address = {Cambridge}, address = {Cambridge},
title = {Consistent {Quantum} {Theory}}, title = {Consistent Quantum Theory},
isbn = {978-0-521-53929-6}, isbn = {978-0-521-53929-6},
doi = {10.1017/CBO9780511606052}, doi = {10.1017/CBO9780511606052},
publisher = {Cambridge University Press}, publisher = {Cambridge University Press},
@@ -1042,12 +1072,13 @@
} }
@misc{gottesman_fault-tolerant_2014, @misc{gottesman_fault-tolerant_2014,
title = {Fault-{Tolerant} {Quantum} {Computation} with {Constant} {Overhead}}, title = {Fault-Tolerant Quantum Computation with Constant Overhead},
doi = {10.48550/arXiv.1310.2984}, doi = {10.48550/arXiv.1310.2984},
publisher = {arXiv}, publisher = {arXiv},
author = {Gottesman, Daniel}, author = {Gottesman, Daniel},
month = jul, month = jul,
year = {2014}, year = {2014},
howpublished = {arXiv:1310.2984},
} }
@misc{gidney_new_2023, @misc{gidney_new_2023,
@@ -1057,10 +1088,11 @@
author = {Gidney, Craig and Jones, Cody}, author = {Gidney, Craig and Jones, Cody},
month = dec, month = dec,
year = {2023}, year = {2023},
howpublished = {arXiv:2312.08813},
} }
@article{gidney_fault-tolerant_2021, @article{gidney_fault-tolerant_2021,
title = {A {Fault}-{Tolerant} {Honeycomb} {Memory}}, title = {A Fault-Tolerant Honeycomb Memory},
volume = {5}, volume = {5},
issn = {2521-327X}, issn = {2521-327X},
doi = {10.22331/q-2021-12-20-605}, doi = {10.22331/q-2021-12-20-605},
@@ -1093,7 +1125,7 @@
number = {7867}, number = {7867},
journal = {Nature}, journal = {Nature},
publisher = {Nature Publishing Group}, publisher = {Nature Publishing Group},
author = {Chen, Zijun and Satzinger, Kevin J. and Atalaya, Juan and Korotkov, Alexander N. and Dunsworth, Andrew and Sank, Daniel and Quintana, Chris and McEwen, Matt and Barends, Rami and Klimov, Paul V. and Hong, Sabrina and Jones, Cody and Petukhov, Andre and Kafri, Dvir and Demura, Sean and Burkett, Brian and Gidney, Craig and Fowler, Austin G. and Paler, Alexandru and Putterman, Harald and Aleiner, Igor and Arute, Frank and Arya, Kunal and Babbush, Ryan and Bardin, Joseph C. and Bengtsson, Andreas and Bourassa, Alexandre and Broughton, Michael and Buckley, Bob B. and Buell, David A. and Bushnell, Nicholas and Chiaro, Benjamin and Collins, Roberto and Courtney, William and Derk, Alan R. and Eppens, Daniel and Erickson, Catherine and Farhi, Edward and Foxen, Brooks and Giustina, Marissa and Greene, Ami and Gross, Jonathan A. and Harrigan, Matthew P. and Harrington, Sean D. and Hilton, Jeremy and Ho, Alan and Huang, Trent and Huggins, William J. and Ioffe, L. B. and Isakov, Sergei V. and Jeffrey, Evan and Jiang, Zhang and Kechedzhi, Kostyantyn and Kim, Seon and Kitaev, Alexei and Kostritsa, Fedor and Landhuis, David and Laptev, Pavel and Lucero, Erik and Martin, Orion and McClean, Jarrod R. and McCourt, Trevor and Mi, Xiao and Miao, Kevin C. and Mohseni, Masoud and Montazeri, Shirin and Mruczkiewicz, Wojciech and Mutus, Josh and Naaman, Ofer and Neeley, Matthew and Neill, Charles and Newman, Michael and Niu, Murphy Yuezhen and OBrien, Thomas E. and Opremcak, Alex and Ostby, Eric and Pató, Bálint and Redd, Nicholas and Roushan, Pedram and Rubin, Nicholas C. and Shvarts, Vladimir and Strain, Doug and Szalay, Marco and Trevithick, Matthew D. and Villalonga, Benjamin and White, Theodore and Yao, Z. Jamie and Yeh, Ping and Yoo, Juhwan and Zalcman, Adam and Neven, Hartmut and Boixo, Sergio and Smelyanskiy, Vadim and Chen, Yu and Megrant, Anthony and Kelly, Julian and {Google Quantum AI}}, author = {{Google Quantum AI}},
month = jul, month = jul,
year = {2021}, year = {2021},
pages = {383--387}, pages = {383--387},
@@ -1120,13 +1152,15 @@
author = {Bomb{\'i}n, H{\'e}ctor and Dawson, Chris and Liu, Ye-Hua and Nickerson, Naomi and Pastawski, Fernando and Roberts, Sam}, author = {Bomb{\'i}n, H{\'e}ctor and Dawson, Chris and Liu, Ye-Hua and Nickerson, Naomi and Pastawski, Fernando and Roberts, Sam},
month = mar, month = mar,
year = {2023}, year = {2023},
howpublished = {arXiv:2303.04846},
} }
@misc{leverrier_decoding_2022, @misc{leverrier_decoding_2022,
title = {Decoding quantum {Tanner} codes}, title = {Decoding quantum Tanner codes},
doi = {10.48550/arXiv.2208.05537}, doi = {10.48550/arXiv.2208.05537},
publisher = {arXiv}, publisher = {arXiv},
author = {Leverrier, Anthony and Z{\'e}mor, Gilles}, author = {Leverrier, Anthony and Z{\'e}mor, Gilles},
month = dec, month = dec,
year = {2022}, year = {2022},
howpublished = {arXiv:2208.05537},
} }

View File

@@ -1 +1,197 @@
\chapter{Introduction} \chapter{Introduction}
\label{ch:Introduction}
\acresetall
% Intro to quantum computing
In 1982, Richard Feynman, motivated by the difficulty of simulating
quantum-mechanical systems on classical hardware, put forward the
idea of building computers that are themselves quantum mechanical
\cite{feynman_simulating_1982}.
The use of such quantum computers has since been shown to offer promising
prospects not only with regard to simulating quantum systems but also
for solving certain kinds of problems that are classically intractable.
The most prominent example is Shor's algorithm for integer
factorization \cite{shor_algorithms_1994}.
Similar to the way classical computers are built from bits and gates,
quantum computers are built from \emph{qubits} and \emph{quantum gates}.
Because of quantum entanglement, it does not suffice to consider the
qubits individually, we also have to consider correlations between them.
For a system of $n$ qubits, this makes the state space grow with
$2^n$ instead of linearly with $n$, as would be the case for a classical system
\cite[Sec.~1]{gottesman_stabilizer_1997}.
This is both the reason quantum systems are difficult to simulate and
what provides them with their power \cite[Sec.~2.1]{roffe_decoding_2020}.
% The need for QEC
Realizing algorithms that leverage these quantum-mechanical effects
requires hardware that can execute long quantum computations reliably.
This poses a problem, because the qubits making up current devices
consistently interact with their environment \cite[Sec.~1]{roffe_quantum_2019}.
This interaction acts as a continuous small-scale measurement, an
effect we call \emph{decoherence} of the stored quantum state, which
results in errors on the qubits.
Decoherence is the reason large systems do not exhibit visible quantum
properties at human scales \cite[Sec.~1]{gottesman_stabilizer_1997}.
% Intro to QEC
\Ac{qec} has emerged as a leading candidate in solving this problem.
It addresses the issue by encoding the information of $k$
\emph{logical qubits} into a larger number $n>k$ of \emph{physical
qubits}, in close analogy to classical channel coding
\cite[Sec.~1]{roffe_quantum_2019}.
The redundancy introduced this way can then be used to detect and
correct a corrupted the quantum state.
The quantum setting imposes some important constraints that do not exist in the
classical case, however \cite[Sec.~2.4]{roffe_quantum_2019}:
\begin{itemize}
\item The no-cloning theorem prohibits the duplication of quantum states.
\item In addition to the bit-flip errors we know from the
classical setting, qubits are subject to \emph{phase-flips}.
\item We are not allowed to directly measure the encoded qubits,
as that would collapse their quantum states.
\end{itemize}
We can deal with the first constraint by not duplicating information, instead
spreading the quantum state across the physical qubits
\cite[Sec.~I]{calderbank_good_1996}.
To deal with phase-flip errors, we must take special care when
constructing \ac{qec} codes.
Using \ac{css} codes, for example, we can use two separate classical
binary linear codes to protect against the two kinds of errors
\cite[Sec. 10.5.6]{nielsen_quantum_2010}.
Finally, we can get around the last issue by using \emph{stabilizer
measurements}.
These are parity measurements that give us information about
potential errors without revealing the underlying qubit states
\cite[Sec.~II.C.]{babar_fifteen_2015}.
This way, we perform a \emph{syndrome extraction} and base the
subsequent decoding process on the measured syndrome.
Another difference between \ac{qec} and classical channel coding is
the resource constraints.
For \ac{qec}, achieving low latency matters more than having a low
overall computational complexity, due to the backlog problem
\cite[Sec.~II.G.3.]{terhal_quantum_2015}: Certain gates turn
single-qubit errors into multi-qubit ones, so errors must be
corrected beforehand.
A \ac{qec} system that is too slow accumulates a backlog at these points,
causing exponential slowdown.
Several code constructions have been proposed for \ac{qec} codes over the years.
Topological codes, such as surface codes, have been the industry
standard for experimental applications for a long time
\cite[Sec.~I]{koutsioumpas_colour_2025}, due to their
reliance on only local connections between qubits
\cite[Sec.~5]{roffe_decoding_2020}.
Recently, \ac{qldpc} codes have been getting increasing
attention as they have been shown to offer comparable thresholds with
substantially improved encoding rates \cite[Sec.~1]{bravyi_high-threshold_2024}.
\ac{qldpc} codes are generally decoded using a syndrome-based variant
of the \ac{bp} algorithm \cite[Sec.~1]{roffe_decoding_2020}.
We focus on \ac{qldpc} codes in our work and specifically \ac{bb} codes,
as they are promising candidates for practical QEC due to their high
encoding rates, large minimum distances, and short-depth syndrome
extraction circuits \cite[Sec.~1]{bravyi_high-threshold_2024}.
% DEMs and fault tolerance
The syndrome extraction itself is implemented on quantum hardware and
is therefore subject to the same noise as the data qubits.
As a consequence, the \ac{qec} procedure, meant to protect the quantum
state, itself introduces new \emph{internal errors}.
A procedure is called \emph{fault-tolerant} if it remains effective
even in the presence of these internal errors
\cite[Sec.~4]{gottesman_introduction_2009}.
To deal with internal errors that flip syndrome bits, multiple rounds
of syndrome measurements are performed.
One approach of implementing fault tolerance is using \acp{dem}.
A \ac{dem} abstracts away the underlying circuit,
focusing only on the relationship between possible errors
and their effects on the syndrome \cite[Sec.~1.4.3]{higgott_practical_2024}.
A \emph{detector error matrix} is generated from the circuit, which is
used for decoding instead of the original check matrix.
The detector error matrix is much larger than the
check matrix of the underlying code, since it needs to represent many
more error locations.
For example, in our experiments using the $\llbracket 144,12,12
\rrbracket$ \ac{bb} code with $12$ syndrome measurement rounds, the
number of \acp{vn} grew from $144$ to $9504$ and the number of
\acp{cn} grew from $72$ to $1008$.
Therefore, decoding under a \ac{dem} poses a challenge with respect to the
latency constraint.
To keep the latency of \ac{dem} decoding manageable, one approach is
\emph{sliding-window decoding}.
Instead of decoding on the entire detector error matrix at once,
it is partitioned into several overlapping windows.
Once decoding of one window is complete, error estimates on the initial part
that is no longer needed are committed, and the next window is processed.
This way, decoding can start as soon as the syndrome bits required
for the first window have been extracted.
The idea originates with the \emph{overlapping recovery} scheme
proposed for the surface code in
\cite[Sec.~IV.B]{dennis_topological_2002} and has since been studied
for surface and toric codes \cite{kuo_fault-tolerant_2024} as well as
for \ac{qldpc} codes under both phenomenological and circuit-level
noise \cite{huang_increasing_2024,gong_toward_2024,kang_quits_2025}.
% Reseach gap + our work
We observe a structural similarity between sliding-window decoding for
\acp{dem} and window decoding for \ac{sc}-\acs{ldpc} codes.
In contrast to the latter, however, where \ac{bp} messages are
carried between windows \cite[Sec.~III.~C.]{hassan_fully_2016},
the existing realizations of sliding-window decoding for \ac{qec}
discard the soft information produced inside one window before moving
to the next.
We propose \emph{warm-start sliding-window decoding}, in which the
\ac{bp} messages from the overlap region of the previous window are
reused to initialize \ac{bp} in the current window in place of the
standard cold-start initialization.
We formulate the warm start for standard \ac{bp} and for
\ac{bpgd}, a variant of \ac{bp} with better convergence properties
for \ac{qec} codes.
The decoders are evaluated by Monte Carlo simulation on the
$\llbracket 144,12,12 \rrbracket$ \ac{bb} code under standard
circuit-based depolarizing noise over $12$ syndrome extraction rounds.
The main finding is that warm-starting yields a consistent
improvement at low iteration budgets, which is the regime relevant for
low-latency operation.
% Outline of the Thesis
This thesis is structured as follows:
\Cref{ch:Fundamentals} reviews the fundamentals of classical and
quantum error correction.
On the classical side, it covers binary linear block codes,
\ac{ldpc} and \ac{sc}-\ac{ldpc} codes, and the \ac{bp} decoding
algorithm.
On the quantum side, it introduces the relevant quantum mechanical
notation, stabilizer measurements, stabilizer codes, \acf{css} codes,
\ac{qldpc} codes, and the \ac{bpgd} algorithm.
\Cref{ch:Fault tolerance} introduces fault-tolerant \ac{qec}.
It formalizes the notion of fault tolerance, presents the noise
models considered in this work, and develops the \ac{dem} formalism
through the measurement syndrome matrix, the detector matrix, and the
detector error matrix.
The chapter closes with a discussion of practical considerations
including the choice of noise model, the per-round \acf{ler}, and the
Stim toolchain.
\Cref{ch:Decoding} considers practical aspects of decoding under \acp{dem}.
It reviews the existing literature on sliding-window decoding for
\ac{qec}, develops the formal windowing construction we build upon,
introduces the proposed warm-start sliding-window decoder for
plain \ac{bp} and for \ac{bpgd}, and reports numerical results on the
$\llbracket 144,12,12 \rrbracket$ \ac{bb} code.
% TODO: Possibly extend to mention specific proposed research directions
\Cref{ch:Conclusion} concludes the thesis and outlines directions for
further research.

File diff suppressed because it is too large Load Diff

View File

@@ -16,17 +16,19 @@ using qubits.
While the use of error correcting codes may facilitate this, it also While the use of error correcting codes may facilitate this, it also
introduces two new challenges \cite[Sec.~4]{gottesman_introduction_2009}: introduces two new challenges \cite[Sec.~4]{gottesman_introduction_2009}:
\begin{itemize} \begin{itemize}
\item We must be able to perform operations on the encoded state \item To realize a quantum algorithm, we must be able to
in such a way that we do not lose the protection against errors. perform operations on the encoded state in such a way that we
\item \ac{qec} systems are themselves partially implemented in do not lose the protection against errors.
quantum hardware. In addition to the errors we have \item \ac{qec} systems, in particular the syndrome extraction
originally introduced them for, these systems must circuit, are themselves partially implemented in
be able to account for the fact they are implemented on noisy quantum hardware.
hardware themselves. In addition to the errors we have originally introduced them
for, these systems must therefore be able to account for the
fact they are implemented on noisy hardware themselves.
\end{itemize} \end{itemize}
In the literature, both of these points are viewed under the umbrella In the literature, both of these points are viewed under the umbrella
of \emph{fault tolerance}. of \emph{fault-tolerant} quantum computing.
We focus only on the second aspect in this work. In this thesis, we focus on the second aspect.
It was recognized early on as a challenge of \ac{qec} that the correction It was recognized early on as a challenge of \ac{qec} that the correction
machinery itself may introduce new faults \cite[Sec.~III]{shor_scheme_1995}. machinery itself may introduce new faults \cite[Sec.~III]{shor_scheme_1995}.
@@ -43,16 +45,16 @@ address both.
We model the possible occurrence of errors during any processing We model the possible occurrence of errors during any processing
stage as different \emph{error locations} $E_i,~i\in [1:N]$ stage as different \emph{error locations} $E_i,~i\in [1:N]$
in the circuit. in the circuit.
$N \in \mathbb{N}$ is the total number of considered error locations. The parameter $N \in \mathbb{N}$ is the total number of considered
error locations.
The \emph{circuit error vector} $\bm{e} \in \{0,1\}^N$ is a vector The \emph{circuit error vector} $\bm{e} \in \{0,1\}^N$ is a vector
indicating which errors occurred, with indicating which errors occurred, with
\begin{align*} \begin{align*}
e_i := e_i :=
\begin{cases} \begin{cases}
1, & \text{Error $E_i$ occurred} \\ 1, & \text{error $E_i$ occurred}, \\
0, & \text{otherwise} 0, & \text{otherwise}.
\end{cases} \end{cases}
.%
\end{align*} \end{align*}
\Cref{fig:fault_tolerance_overview} illustrates the flow of errors. \Cref{fig:fault_tolerance_overview} illustrates the flow of errors.
Specifically for \ac{css} codes, a \ac{qec} procedure is deemed Specifically for \ac{css} codes, a \ac{qec} procedure is deemed
@@ -72,12 +74,14 @@ fault-tolerant, if \cite[Def.~4.2]{derks_designing_2025}
where $t = \lfloor (d_\text{min} -1)/2 \rfloor$ is the number of where $t = \lfloor (d_\text{min} -1)/2 \rfloor$ is the number of
errors the code is able to correct. errors the code is able to correct.
The vectors $\bm{e}_{\text{output},X}$ and $\bm{e}_{\text{output},Z}$ The vectors $\bm{e}_{\text{output},X}$ and $\bm{e}_{\text{output},Z}$
denote only $X$ and $Z$ errors respectively. denote only $X$ and $Z$ errors, respectively.
% TODO: Properly introduce d_min for QEC, specifically for CSS codes % TODO: Properly introduce d_min for QEC, specifically for CSS codes
In order to deal with internal errors that flip syndrome bits, In order to deal with internal errors that flip syndrome bits,
multiple rounds of syndrome measurements must be performed. multiple rounds of syndrome measurements are performed.
Typically, the number of syndrome extraction rounds is chosen as $d_\text{min}$. Typically, the number of syndrome extraction rounds is chosen as
$d_\text{min}$, e.g., \cite{gong_toward_2024}
\cite{koutsioumpas_automorphism_2025}.
% % This is the definition of a fault-tolerant QEC gadget % % This is the definition of a fault-tolerant QEC gadget
% A \ac{qec} procedure is deemed fault tolerant if % A \ac{qec} procedure is deemed fault tolerant if
@@ -150,7 +154,7 @@ Typically, the number of syndrome extraction rounds is chosen as $d_\text{min}$.
% Intro % Intro
We collect the probabilities of error at each location in the We collect the probabilities of error at each location in the
\emph{noise model}, a vector $\bm{p} \in [0,1]^N$. \emph{noise model}, represented by a vector $\bm{p} \in [0,1]^N$.
There are different types of noise models, each allowing for There are different types of noise models, each allowing for
different error locations in the circuit. different error locations in the circuit.
@@ -178,8 +182,7 @@ $\ket{\psi}_\text{L}$ as \emph{data qubits}.
Note that this is a concrete implementation using CNOT gates, as Note that this is a concrete implementation using CNOT gates, as
opposed to the system-level view introduced in opposed to the system-level view introduced in
\Cref{subsec:Stabilizer Codes}. \Cref{subsec:Stabilizer Codes}.
We visualize the different types of noise models in \Cref{fig:noise_model_types} visualizes the different types of noise models.
\Cref{fig:noise_model_types}.
%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%
\subsection{Bit-Flip Noise} \subsection{Bit-Flip Noise}
@@ -188,9 +191,11 @@ We visualize the different types of noise models in
The simplest type of noise model is \emph{bit-flip} noise. The simplest type of noise model is \emph{bit-flip} noise.
This corresponds to the classical \ac{bsc}, i.e., only $X$ errors on the This corresponds to the classical \ac{bsc}, i.e., only $X$ errors on the
data qubits are possible \cite[Appendix~A]{gidney_new_2023}. data qubits are possible \cite[Appendix~A]{gidney_new_2023}.
This type of noise model is shown in \Cref{subfig:bit_flip}. The occurrence of bit-flip errors is modeled as a Bernoulli process
$\text{Bern}(p)$.
\Cref{subfig:bit_flip} shows this type of noise model.
Note that we cannot use bit-flip noise to develop fault-tolerant Note that bit-flip noise is not suitable for developing fault-tolerant
systems, as it does not account for errors during the syndrome extraction. systems, as it does not account for errors during the syndrome extraction.
%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%
@@ -221,7 +226,7 @@ Here, we consider multiple rounds of syndrome measurements with a
depolarizing channel before each round. depolarizing channel before each round.
Additionally, we allow for measurement errors by having $X$ error Additionally, we allow for measurement errors by having $X$ error
locations right before each measurement \cite[Appendix~A]{gidney_new_2023}. locations right before each measurement \cite[Appendix~A]{gidney_new_2023}.
Note that it is enough to only consider $X$ errors at these points, Note that it is enough to only consider $X$ errors before measuring,
since that is the only type of error directly affecting the since that is the only type of error directly affecting the
measurement outcomes. measurement outcomes.
This model is depicted in \Cref{subfig:phenomenological}. This model is depicted in \Cref{subfig:phenomenological}.
@@ -243,15 +248,15 @@ Here we not only consider noise between syndrome extraction rounds
and at the measurements, but at each gate. and at the measurements, but at each gate.
Specifically, we allow arbitrary $n$-qubit Pauli errors after each Specifically, we allow arbitrary $n$-qubit Pauli errors after each
$n$-qubit gate \cite[Def.~2.5]{derks_designing_2025}. $n$-qubit gate \cite[Def.~2.5]{derks_designing_2025}.
An $n$-qubit Pauli error is simply a series of correlated Pauli An $n$-qubit Pauli error can be written as a series of correlated Pauli
errors on each related individual qubit. errors on each related individual qubit.
This type of noise model is shown in \Cref{subfig:circuit_level}. This type of noise model is shown in \Cref{subfig:circuit_level}.
While phenomenological noise is useful for some design aspects of While phenomenological noise is useful for some design aspects of
fault tolerant circuitry, for simulations, circuit-level noise should fault-tolerant circuitry, for simulations, circuit-level noise should
always be used \cite[Sec.~4.2]{derks_designing_2025}. always be used \cite[Sec.~4.2]{derks_designing_2025}.
Note that this introduces new challenges during the decoding process, Note that this introduces new challenges during the decoding process,
as the decoding complexity is increased considerably due to the many as the decoding complexity is considerably increased due to the many
error locations. error locations.
\begin{figure}[t] \begin{figure}[t]
@@ -282,11 +287,11 @@ error locations.
framework for framework for
passing information about a circuit used for \ac{qec} to a decoder. passing information about a circuit used for \ac{qec} to a decoder.
They are also useful as a theoretical tool to aid in the design of They are also useful as a theoretical tool to aid in the design of
fault-tolerant \ac{qec} schemes. fault-tolerant \ac{qec} schemes, e.g., they can be used to easily
E.g., they can be used to easily determine whether a measurement determine whether a measurement schedule is fault-tolerant
schedule is fault-tolerant \cite[Example~12]{derks_designing_2025}. \cite[Example~12]{derks_designing_2025}.
Other approaches of implementing fault tolerance exist, such as Other approaches of implementing fault-tolerance circuits exist, e.g.,
flag error correction, which uses additional ancilla qubits to detect flag error correction, which uses additional ancilla qubits to detect
potentially damaging high-weight errors \cite[Sec.~1]{chamberland_flag_2018}. potentially damaging high-weight errors \cite[Sec.~1]{chamberland_flag_2018}.
However, \acp{dem} offer some unique advantages However, \acp{dem} offer some unique advantages
@@ -300,8 +305,7 @@ However, \acp{dem} offer some unique advantages
treated in a unified manner. This leads to a more powerful treated in a unified manner. This leads to a more powerful
description of the overall circuit. description of the overall circuit.
\end{itemize} \end{itemize}
In this work, we only consider the process of decoding under the In this work, we consider the process of decoding under the \ac{dem} framework.
\ac{dem} framework.
% Core idea % Core idea
@@ -309,7 +313,7 @@ To achieve fault tolerance, the goal we strive towards is to
consider the internal errors in addition to the input errors during consider the internal errors in addition to the input errors during
the decoding process. the decoding process.
The core idea behind detector error models is to do this by defining The core idea behind detector error models is to do this by defining
a new \emph{circuit code} that describes the circuit. a new \emph{circuit code} describing the whole circuit.
Each \ac{vn} of this new code corresponds to an error location in the Each \ac{vn} of this new code corresponds to an error location in the
circuit and each \ac{cn} corresponds to a syndrome measurement. circuit and each \ac{cn} corresponds to a syndrome measurement.
% This circuit code, combined with the prior probabilities of error % This circuit code, combined with the prior probabilities of error
@@ -445,12 +449,11 @@ matrix} $\bm{\Omega} \in \mathbb{F}_2^{M\times N}$, with
\begin{align*} \begin{align*}
\Omega_{\ell,i} = \Omega_{\ell,i} =
\begin{cases} \begin{cases}
1, & \text{Error $i$ flips measurement $\ell$}\\ 1, & \text{error $i$ flips measurement $\ell$},\\
0, & \text{otherwise} 0, & \text{otherwise},
\end{cases} \end{cases}
,%
\end{align*} \end{align*}
where $M \in \mathbb{N}$ is the number of measurements. where $M \in \mathbb{N}$ is the number of performed syndrome measurements.
To obtain $\bm{\Omega}$, we must propagate Pauli errors through the To obtain $\bm{\Omega}$, we must propagate Pauli errors through the
circuit, tracking which measurements they affect circuit, tracking which measurements they affect
\cite[Sec.~2.4]{derks_designing_2025}. \cite[Sec.~2.4]{derks_designing_2025}.
@@ -459,15 +462,22 @@ circuit, tracking which measurements they affect
We turn to our example of the three-qubit repetition code to We turn to our example of the three-qubit repetition code to
illustrate the construction of the syndrome measurement matrix. illustrate the construction of the syndrome measurement matrix.
We begin by extending our check matrix in \Cref{eq:rep_code_H} We begin by extending our check matrix $\bm{H}_Z$ in
to represent three rounds of syndrome extraction. \Cref{eq:rep_code_H} to represent three rounds of syndrome extraction.
Each round yields an additional set of syndrome bits, Each round yields an additional set of syndrome bits,
and we combine them by stacking them in a new vector and we combine them by stacking them in a new vector
$\bm{s} \in \mathbb{F}_2^{R(n-k)}$. $\bm{s} \in \mathbb{F}_2^{R(n-k)}$, where $R \in \mathbb{N}$ is the
We thus have to replicate the rows of $\bm{\Omega}$, once for each number of syndrome measurement rounds.
additional syndrome measurement, to obtain Thus, we have to replicate the rows of $\bm{H}_Z$, once for each
additional syndrome measurement, and obtain
\begin{align*} \begin{align*}
\bm{\Omega} = \bm{\Omega}_0 =
\begin{pmatrix}
\bm{H}_Z \\
\bm{H}_Z \\
\bm{H}_Z
\end{pmatrix}
=
\begin{pmatrix} \begin{pmatrix}
1 & 1 & 0 \\ 1 & 1 & 0 \\
0 & 1 & 1 \\ 0 & 1 & 1 \\
@@ -482,31 +492,31 @@ additional syndrome measurement, to obtain
depicts the corresponding circuit. depicts the corresponding circuit.
Note that we have not yet introduced error locations in the syndrome Note that we have not yet introduced error locations in the syndrome
extraction circuitry, so we still consider only bit flip noise at this stage. extraction circuitry, so we still consider only bit flip noise at this stage.
Recall that $\bm{\Omega}$ describes which \ac{vn} is connected to Recall that $\bm{\Omega}_0$ describes which \ac{vn} is connected to
which parity check and the syndrome indicates which parity checks which parity check and the syndrome indicates which parity checks
are violated. are violated.
This means that if an error exists at only a single \ac{vn}, we can Therefore, if an error occurs that corresponds to a single \ac{vn},
read off the syndrome in the corresponding column. the measured syndrome is the corresponding column.
If errors occur at multiple locations, the resulting syndrome will be If errors occur at multiple locations, the resulting syndrome will be
the linear combination of the respective columns. the linear combination of the respective columns.
We thus have Thus, we have
\begin{align*} \begin{align*}
\bm{s} \in \text{span} \{\bm{\Omega}\} \bm{s} \in \text{span} \{\bm{\Omega}_0\}
.% .%
\end{align*} \end{align*}
% Expand to phenomenological % Expand to phenomenological
We now wish to expand the error model to phenomenological noise, though Next, we expand the error model to phenomenological noise, though
only considering $X$ errors in this case. only considering $X$ errors in this case.
We introduce new error locations at the appropriate positions, We introduce new error locations at the appropriate positions,
arriving at the circuit depicted in resulting in the circuit depicted in
\Cref{fig:rep_code_multiple_rounds_phenomenological}. \Cref{fig:rep_code_multiple_rounds_phenomenological}.
For each additional error location, we extend $\bm{\Omega}$ by For each additional error location, we extend $\bm{\Omega}_0$ by
appending the corresponding syndrome vector as a column. appending the corresponding syndrome vector as a column, yielding
\begin{gather} \begin{gather}
\label{eq:syndrome_matrix_ex} \label{eq:syndrome_matrix_ex}
\bm{\Omega} = \bm{\Omega}_1 =
\left( \left(
\begin{array}{ccccccccccccccc} \begin{array}{ccccccccccccccc}
1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0
@@ -523,24 +533,25 @@ appending the corresponding syndrome vector as a column.
& 0 & 1 & 1 & 0 & 1 & 0 & 1 & 1 & 0 & 1
\end{array} \end{array}
\right) . \\[-6mm] \right) . \\[-6mm]
\hspace*{-58.7mm} \hspace*{-56.7mm}
\underbrace{ \underbrace{
\phantom{ \phantom{
\begin{array}{ccc} \begin{array}{ccc}
0 & 0 & 0 0 & 0 & 0
\end{array} \end{array}
} }
}_\text{Original matrix} }_{\bm{\Omega}_0} \nonumber
\end{gather} \end{gather}
Notice that the first three columns correspond to the original Notice that the first three columns correspond to the original
measurement syndrome matrix, as these columns correspond to the error measurement syndrome matrix $\bm{\Omega}_0$, as these columns
locations on the data qubits. correspond to the error locations on the data qubits.
In this example, all measurements we considered were syndrome measurements. In this example, all measurements we considered were syndrome measurements.
Assuming no errors, the results of those measurements were Assuming no errors, the results of those measurements are
deterministic, irrespective of the actual logical state deterministic: They are not subject to any probabilistic behavior
$\ket{\psi}_\text{L}$, as they only depend on whether despite the quantum mechanical nature of the underlying system.
$\ket{\psi}_\text{L} \in \mathcal{C}$, not on the concrete state. They only depend on whether $\ket{\psi}_\text{L} \in \mathcal{C}$,
not on the concrete state.
It is, in general, possible to also consider non-deterministic measurements. It is, in general, possible to also consider non-deterministic measurements.
As an example, it is usual to consider a round of noiseless As an example, it is usual to consider a round of noiseless
measurements of the actual data qubit states after the last syndrome measurements of the actual data qubit states after the last syndrome
@@ -557,7 +568,7 @@ extraction round.
\centering \centering
\begin{tikzpicture} \begin{tikzpicture}
\node{$% \node{$%
\bm{\Omega} = \bm{\Omega}_0 =
\begin{pmatrix} \begin{pmatrix}
1 & 1 & 0 \\ 1 & 1 & 0 \\
0 & 1 & 1 \\ 0 & 1 & 1 \\
@@ -659,7 +670,7 @@ extraction round.
\begin{figure}[t] \begin{figure}[t]
\begin{gather*} \begin{gather*}
\hspace*{-33.3mm}% \hspace*{-31.8mm}%
\begin{array}{c} \begin{array}{c}
E_6 \\ E_6 \\
\downarrow \downarrow
@@ -667,7 +678,7 @@ extraction round.
\end{gather*} \end{gather*}
\vspace*{-8mm} \vspace*{-8mm}
\begin{gather*} \begin{gather*}
\bm{\Omega} = \bm{\Omega}_1 =
\left( \left(
\begin{array}{ \begin{array}{
cccccc% cccccc%
@@ -761,10 +772,10 @@ Instead of using stabilizer measurement results directly, we
generalize the notion of what constitutes a parity check slightly. generalize the notion of what constitutes a parity check slightly.
We formally define a \emph{detector} as a deterministic parity constraint on We formally define a \emph{detector} as a deterministic parity constraint on
a set of measurement outcomes \cite[Def.~2.1]{derks_designing_2025}. a set of measurement outcomes \cite[Def.~2.1]{derks_designing_2025}.
It can be seen that we will have as many linearly
independent detectors as there are separate deterministic measurements.
In the most straightforward case, we may simply use the stabilizer In the most straightforward case, we may simply use the stabilizer
measurements as detectors. measurements as detectors.
We immediately recognize that we will have as many linearly
independent detectors as there are separate deterministic measurements.
We generally aim to utilize the maximum number of linearly We generally aim to utilize the maximum number of linearly
independent detectors \cite[Sec.~2.2]{derks_designing_2025}. independent detectors \cite[Sec.~2.2]{derks_designing_2025}.
@@ -775,23 +786,22 @@ the \emph{detector matrix} $\bm{D} \in \mathbb{F}_2^{D\times M}$
\cite[Def.~2.2]{derks_designing_2025}, with $~D\in \mathbb{N}$ \cite[Def.~2.2]{derks_designing_2025}, with $~D\in \mathbb{N}$
denoting the number of detectors. denoting the number of detectors.
Similar to the way a \ac{pcm} associates bits with parity checks, the Similar to the way a \ac{pcm} associates bits with parity checks, the
detector matrix links measurements and detectors. detector matrix links measurement outcomes and detectors.
Each column corresponds to a measurement, while each rows corresponds Each column corresponds to a measurement, while each row corresponds
to a detector. to a detector.
We should note at this point that the combination of measurements We should note at this point that the combination of measurements
into detectors has no bearing on the actual construction of the into detectors has no bearing on the actual construction of the
syndrome extraction circuitry. syndrome extraction circuitry.
It is something that happens ``virtually'' after the fact and only It is something that happens ``virtually'' and only affects the decoder.
affects the decoder.
Note that we can use the detector matrix $\bm{D}$ to describe the set Note that we can use the detector matrix $\bm{D}$ to describe the set
of possible measurement outcomes under the absence of noise. of possible measurement outcomes under the absence of noise.
The same way we use a \ac{pcm} to describe the code space as Similar to the we use a \ac{pcm} to describe the code space as
\begin{align*} \begin{equation*}
\mathcal{C} \mathcal{C}
= \{ \bm{x} \in \mathbb{F}_2^{n} : \bm{H}\bm{x}^\text{T} = \bm{0} \} = \{ \bm{x} \in \mathbb{F}_2^{n} : \bm{H}\bm{x}^\mathsf{T} = \bm{0} \}
,% ,%
\end{align*} \end{equation*}
the set of possible measurement outcomes is simply $\text{kern}\{\bm{D}\}$ the set of possible measurement outcomes is simply $\text{kern}\{\bm{D}\}$
\cite[Sec.~2.2]{derks_designing_2025}. \cite[Sec.~2.2]{derks_designing_2025}.
@@ -806,7 +816,7 @@ affect the measurements (through $\bm{\Omega}$), and we know how the
measurements relate to the detectors (through $\bm{D}$). measurements relate to the detectors (through $\bm{D}$).
For decoding, we are interested in the effect of the errors on the For decoding, we are interested in the effect of the errors on the
detectors directly. detectors directly.
We thus construct the \emph{detector error matrix} $\bm{H} \in Thus, we construct the \emph{detector error matrix} $\bm{H} \in
\mathbb{F}_2^{D\times N}$ \cite[Def.~2.9]{derks_designing_2025} as \mathbb{F}_2^{D\times N}$ \cite[Def.~2.9]{derks_designing_2025} as
\begin{align*} \begin{align*}
\bm{H} := \bm{D}\bm{\Omega} \bm{H} := \bm{D}\bm{\Omega}
@@ -834,10 +844,10 @@ violate the same set of detectors, i.e.,
\begin{align*} \begin{align*}
\hspace{-15mm} \hspace{-15mm}
% tex-fmt: off % tex-fmt: off
&& \bm{H} \bm{e}_1^\text{T} & \neq \bm{H} \bm{e}_2^\text{T} \\ && \bm{H} \bm{e}_1^\mathsf{T} & \neq \bm{H} \bm{e}_2^\mathsf{T} \\
\iff \hspace{-33mm} && \bm{H} \left( \bm{e}_1 - \bm{e}_2 \right)^\text{T} & \neq 0 \\ \iff \hspace{-33mm} && \bm{H} \left( \bm{e}_1 - \bm{e}_2 \right)^\mathsf{T} & \neq 0 \\
\iff \hspace{-33mm} && \bm{D} \bm{\Omega} \left( \bm{e}_1 - \bm{e}_2 \right)^\text{T} & \neq 0 \\ \iff \hspace{-33mm} && \bm{D} \bm{\Omega} \left( \bm{e}_1 - \bm{e}_2 \right)^\mathsf{T} & \neq 0 \\
\iff \hspace{-33mm} && \bm{\Omega} \left( \bm{e}_1 - \bm{e}_2 \right)^\text{T} & \notin \text{kern} \{\bm{D}\} \iff \hspace{-33mm} && \bm{\Omega} \left( \bm{e}_1 - \bm{e}_2 \right)^\mathsf{T} & \notin \text{kern} \{\bm{D}\}
% tex-fmt: on % tex-fmt: on
.% .%
\end{align*} \end{align*}
@@ -850,7 +860,7 @@ It may, however, change the decoding performance when using a practical decoder.
What constitutes a good set of detectors is difficult to assess What constitutes a good set of detectors is difficult to assess
without performing explicit decoding simulations, since it ultimately without performing explicit decoding simulations, since it ultimately
depends on the decoder employed. depends on the employed decoder.
For iterative decoders, high sparsity is generally beneficial, but For iterative decoders, high sparsity is generally beneficial, but
finding detectors that maximize sparsity is an NP-complete problem finding detectors that maximize sparsity is an NP-complete problem
\cite[Sec.~2.6]{derks_designing_2025}. \cite[Sec.~2.6]{derks_designing_2025}.
@@ -859,7 +869,7 @@ at a later stage.
To the measurement results from each syndrome extraction round we To the measurement results from each syndrome extraction round we
can add the results from the previous round, as illustrated in can add the results from the previous round, as illustrated in
\Cref{fig:detectors_from_measurements_general}. \Cref{fig:detectors_from_measurements_general}.
We thus have $D=n-k$. Thus, we have $D=n-k$.
Concretely, we denote the outcome of Concretely, we denote the outcome of
measurement $\ell \in [1:n-k]$ in round $r \in [1:R]$ by measurement $\ell \in [1:n-k]$ in round $r \in [1:R]$ by
$m_\ell^{(r)} \in \mathbb{F}_2$ $m_\ell^{(r)} \in \mathbb{F}_2$
@@ -915,7 +925,8 @@ with $\bm{m}^{(0)} = \bm{0}$.
We again turn our attention to the three-qubit repetition code. We again turn our attention to the three-qubit repetition code.
In \Cref{fig:rep_code_multiple_rounds_phenomenological} we can see In \Cref{fig:rep_code_multiple_rounds_phenomenological} we can see
that $E_6$ has occurred and has subsequently tripped the last four measurements. that $E_6$ has occurred and has subsequently triggered the last four
measurements.
We now take those measurements and combine them according to We now take those measurements and combine them according to
\Cref{eq:measurement_combination}. \Cref{eq:measurement_combination}.
We can see this process graphically in We can see this process graphically in
@@ -923,19 +934,20 @@ We can see this process graphically in
To understand why this way of defining the detectors is useful, we To understand why this way of defining the detectors is useful, we
note that the error $E_6$ in note that the error $E_6$ in
\Cref{fig:rep_code_multiple_rounds_phenomenological} has not only \Cref{fig:rep_code_multiple_rounds_phenomenological} has not only
tripped the measurements in the syndrome extraction round immediately triggered the measurements in the syndrome extraction round immediately
afterwards, but all subsequent ones as well. afterwards, but all subsequent ones as well.
To only see errors in the rounds immediately following them, we To only see the effect of errors in the syndrome measurement round
consider our newly defined detectors instead of the measurements, immediately following them, we consider our newly defined detectors
that effectively compute the difference between the measurements. instead of the measurements.
These effectively compute the difference between the measurements.
Each error can only trip syndrome bits that follow it. Each error can only trigger syndrome bits that follow it.
This is reflected in the triangular structure of $\bm{\Omega}$ in This is reflected in the triangular structure of $\bm{\Omega}$ in
\Cref{eq:syndrome_matrix_ex}. \Cref{eq:syndrome_matrix_ex}.
Combining the measurements into detectors according to Combining the measurements into detectors according to
\Cref{eq:measurement_combination}, we are effectively performing \Cref{eq:measurement_combination}, we are effectively performing
row additions in such a way as to clear the bottom left of the matrix. row additions in such a way as to clear the bottom left of the matrix.
The detector error matrix The resulting detector error matrix
\begin{align*} \begin{align*}
\bm{H} = \bm{H} =
\left( \left(
@@ -949,7 +961,7 @@ The detector error matrix
\end{array} \end{array}
\right) \right)
\end{align*} \end{align*}
we obtain this way has a block-diagonal structure. has a block-diagonal structure.
Note that we exploit the fact that each syndrome measurement round is Note that we exploit the fact that each syndrome measurement round is
identical to obtain this structure. identical to obtain this structure.
@@ -998,9 +1010,8 @@ error matrix $\bm{H}$ and the noise model $\bm{p}$.
\cite[Sec.~6]{derks_designing_2025}. \cite[Sec.~6]{derks_designing_2025}.
It serves as an abstract representation of a circuit and can be used It serves as an abstract representation of a circuit and can be used
both to transfer information to a decoder but also to aid in the both to transfer information to a decoder but also to aid in the
design of fault-tolerant systems. design of fault-tolerant systems, e.g., it can be used to investigate
E.g., it can be used to investigate the properties of a circuit with the properties of a circuit with respect to fault tolerance.
respect to fault tolerance.
It contains all information necessary for the decoding process. It contains all information necessary for the decoding process.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1030,11 +1041,11 @@ measurements) models \cite[Sec.~2.1]{gidney_fault-tolerant_2021}.
These differ in the way they compute individual error probabilities These differ in the way they compute individual error probabilities
from the physical error rate. from the physical error rate.
In this work we only consider \emph{standard circuit-based depolarizing In this work we consider the \emph{standard circuit-based depolarizing
noise}, as this is the standard approach in the literature. noise} variant of circuit-level noise, as this is the standard
We thus set the error probabilities of all error locations in the approach in the literature:
circuit-level noise model to the same value, the physical error rate We set the error probabilities of all error locations to the same
$p_\text{phys}$. value, the physical error rate $p_\text{phys}$.
%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%
\subsection{Per-Round Logical Error Rate} \subsection{Per-Round Logical Error Rate}
@@ -1042,7 +1053,7 @@ $p_\text{phys}$.
% Per-round LER % Per-round LER
Another aspect that is important to consider is the meaning of the Another important aspect to consider is the meaning of the
\ac{ler} in the context of a \ac{qec} system with multiple \ac{ler} in the context of a \ac{qec} system with multiple
rounds of syndrome measurements. rounds of syndrome measurements.
In order to facilitate the comparability of results obtained from In order to facilitate the comparability of results obtained from
@@ -1053,7 +1064,7 @@ The simplest way of calculating the per-round \ac{ler} is by modeling
each round as an independent experiment. each round as an independent experiment.
For each experiment, an error might occur with a certain probability For each experiment, an error might occur with a certain probability
$p_\text{e,round}$. $p_\text{e,round}$.
The overall probability of error is then Then the overall probability of error is
\begin{align} \begin{align}
\hspace{-12mm} \hspace{-12mm}
p_\text{e,total} &= 1 - (1 - p_\text{e,round})^{R} \nonumber\\ p_\text{e,total} &= 1 - (1 - p_\text{e,round})^{R} \nonumber\\
@@ -1063,15 +1074,15 @@ The overall probability of error is then
.% .%
\hspace{12mm} \hspace{12mm}
\end{align} \end{align}
We approximate $p_\text{e,total}$ using a Monte Carlo simulation and To this end, we approximate $p_\text{e,total}$ using a Monte Carlo
compute the per-round-\ac{ler} using \Cref{eq:per_round_ler}. simulation and
This is a common approach taken in the literature compute the per-round-\ac{ler} according to \Cref{eq:per_round_ler}.
\cite{gong_toward_2024}\cite{wang_fully_2025}. This is the approach taken in \cite{gong_toward_2024}\cite{wang_fully_2025}.
Another common approach \cite{chen_exponential_2021}% Another approach \cite{chen_exponential_2021}%
\cite{bausch_learning_2024}\cite{beni_tesseract_2025} is to assume an \cite{bausch_learning_2024}\cite{beni_tesseract_2025} is to assume an
exponential decay for the decoder's \emph{logical fidelity} exponential decay for the \emph{logical fidelity} of the decoder
\cite[Eq.~2]{bausch_learning_2024} \cite[Eq.~(2)]{bausch_learning_2024}
\begin{align*} \begin{align*}
F_\text{total} = (F_\text{round})^{R} F_\text{total} = (F_\text{round})^{R}
.% .%
@@ -1079,7 +1090,7 @@ exponential decay for the decoder's \emph{logical fidelity}
The logical fidelity is a measure of the quality of a logical state The logical fidelity is a measure of the quality of a logical state
\cite[Appendix~E]{postler_demonstration_2024}. \cite[Appendix~E]{postler_demonstration_2024}.
As it is related to the error rate through $F = 1 - 2p$, we obtain As it is related to the error rate through $F = 1 - 2p$, we obtain
\cite[Eq.~4]{bausch_learning_2024} \cite[Eq.~(4)]{bausch_learning_2024}
\begin{align} \begin{align}
(1 - 2p_\text{e,total}) &= (1 - 2p_\text{e,round})^{R} \nonumber\\ (1 - 2p_\text{e,total}) &= (1 - 2p_\text{e,round})^{R} \nonumber\\
\implies \hspace{15mm} p_\text{e,round} &= \frac{1}{2} \implies \hspace{15mm} p_\text{e,round} &= \frac{1}{2}
@@ -1095,10 +1106,10 @@ topic to our own work.
\subsection{Stim} \subsection{Stim}
\label{subsec:Stim} \label{subsec:Stim}
It is not immediately apparent how the \ac{dem} will look from looking It is not immediately apparent how the \ac{dem} will look from
at a code's \ac{pcm}, because it heavily depends on the exact circuit considering the \ac{pcm} of a code, because it heavily depends on the
construction and choice of noise model. exact circuit construction and choice of noise model.
As we noted in \Cref{subsec:Measurement Syndrome Matrix}, we can As we noted in \Cref{subsec:Measurement Syndrome Matrix}, we
obtain a measurement syndrome matrix by propagating Pauli frames obtain a measurement syndrome matrix by propagating Pauli frames
through the circuit. through the circuit.
The standard choice of simulation tool used for this purpose is The standard choice of simulation tool used for this purpose is
@@ -1109,16 +1120,16 @@ pypi package.
In fact, it was in this tool that the concept of the \ac{dem} was In fact, it was in this tool that the concept of the \ac{dem} was
first introduced. first introduced.
One capability of stim, and \acp{dem} in general, that we didn't go One capability of stim, and \acp{dem} in general, that we did not
into detail about in this chapter is the merging of error mechanisms. explain in detail in this chapter, is the merging of error mechanisms.
Since \acp{dem} differentiate errors based on their effect on the Since \acp{dem} differentiate errors based on their effect on the
measurements and not on their Pauli type and location measurements and not on their Pauli type and location
\cite[Sec.~1.4.3]{higgott_practical_2024}, it is natural to group \cite[Sec.~1.4.3]{higgott_practical_2024}, it is natural to group
errors that have the same effect. errors that have the same effect, i.e., syndrome.
This slightly lowers the computational complexity of decoding, as the This slightly lowers the computational complexity of decoding, as the
number of resulting \acp{vn} is reduced. number of resulting \acp{vn} is reduced.
While stim is a useful tool for circuit simulation, it doesn't While stim is a useful tool for circuit simulation, it does not
include many utilities for building syndrome extraction circuitry automatically. include many utilities for building syndrome extraction circuitry automatically.
The user has to define most, if not all, of the circuit manually, The user has to define most, if not all, of the circuit manually,
depending on the code in question. depending on the code in question.

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,115 @@
\chapter{Conclusion and Outlook} \chapter{Conclusion and Outlook}
\label{ch:Conclusion}
\content{Takeaway: Warm-start more effective for lower numbers of max % Recap of motivation
iterations (plays into our hands because lower number of iterations
means lower latency)} This thesis investigates decoding under \acp{dem} for fault-tolerant
\ac{qec}, with a focus on low-latency decoding methods for \ac{qldpc} codes.
The repetition of the syndrome measurements, especially under
consideration of circuit-level noise, leads to a significant increase
in decoding complexity: In our experiments on the $\llbracket
144,12,12 \rrbracket$ \ac{bb} code with $12$ syndrome extraction
rounds, the check matrix grows from 144 \acp{vn} and 72
\acp{cn} to 9504 \acp{vn} and 1008 \acp{cn}.
% Recap of research gap and own work
Sliding-window decoding addresses the latency constraint by
exploiting the time-like locality of the syndrome extraction circuit.
This manifests as a block-diagonal structure in the detector error
matrix when detectors are defined as the difference of consecutive
syndrome measurement rounds.
We draw a comparison to windowed decoding for \ac{sc}-\ac{ldpc}
codes, but note that the existing realizations of sliding-window
decoding discard the soft information produced inside one window
before moving to the next.
Building on this observation, we proposed warm-start sliding-window
decoding, in which the \ac{bp} messages on the edges crossing into
the overlap region of the previous window are reused to initialise
the corresponding messages of the next window in place of the
standard cold-start initialisation.
We formulate the warm start for standard \ac{bp} and for
\ac{bpgd}.
The latter is particularly attractive as an inner decoder because it
addresses the convergence problems caused by short cycles and
degeneracy in \ac{qldpc} Tanner graphs.
The decoders are evaluated by conducting Monte Carlo simulations on the
$\llbracket 144,12,12 \rrbracket$ \ac{bb} code over $12$ syndrome
extraction rounds under standard circuit-based depolarizing noise.
We focus on a qualitative analysis, refraining from further
optimizations such as introducing a normalization parameter for the
min-sum algorithm.
% Recap of experimental conclusions
For standard min-sum \ac{bp}, the warm start is consistently
beneficial to the cold start, across the considered parameter ranges.
The size of the gain depends on the overlap between consecutive
windows: Enlarging $W$ or shrinking $F$, both of which enlarge the
overlap, result in larger gains of the warm-start.
We observe that the underlying mechanism is an effective increase in
the number of \ac{bp} iterations spent on the \acp{vn} in the overlap
region: Each such \ac{vn} is processed by multiple consecutive window
invocations, and the warm start lets these invocations accumulate
iterations on the same \acp{vn} rather than restarting from scratch.
The gain was most pronounced at low numbers of maximum iterations, where
every additional iteration carries proportionally more information.
For \ac{bpgd}, we note that more information is available in the
overlap region of a window: In addition to the \ac{bp} messages,
there is information about which \acp{vn} were decimated and to what value.
Passing this decimation information to the next window in addition to
the messages turned out to worsen the performance considerably, which
we attributed to a premature hard decision of the \acp{vn} in the
overlap region.
Restricting the warm start to the \ac{bp} messages alone, removed this effect.
The resulting message-only warm start recovered a consistent
improvement over cold-start that followed the same qualitative
behaviour as for standard \ac{bp}: Larger overlap, achieved by larger
$W$ or smaller $F$, yielded a larger gain, and the
performance difference is most pronounced at low numbers of maximum iterations.
% Implications from experimental results
These observations imply that the warm-start modification to
sliding-window decoding can provide a consistent improvement, as long as
some care is taken with specifying the information to be passed to
the subsequent window.
Note that this comes at no additional cost to the decoding complexity,
since the only difference between warm- and cold-start sliding-window
decoding is the initialization of the \ac{bp} messages.
We expect similar behavior with other inner decoders that support
soft information initialization in the overlap region.
% Further research
Several directions for further research emerge from this work.
The most immediate is an extension of the evaluation to other
\ac{qldpc} code families, to other circuit-level noise models such as
SI1000 or EM3, and to a range of code sizes.
This would clarify the generality of the gain due to the warm-start
initialization.
We expect the qualitative findings to carry over, since the
underlying mechanism is structural rather than code-specific, but
quantifying the gain across code families and noise models is left to
future work.
A second direction is a systematic study of other inner decoders under the
warm-start framework, such as automorphism ensemble decoding
\cite{koutsioumpas_automorphism_2025} or neural \ac{bp}
\cite{miao_quaternary_2025}.
A final direction is suggested by the structural similarity between
sliding-window decoding for \acp{dem} and windowed decoding for
\ac{sc}-\ac{ldpc} codes.
The current approach to generating the syndrome extraction circuitry
necessarily leads to a coupling width of one between adjacent
syndrome measurement rounds.
A natural question is whether the coupling width could be
increased, e.g., by interleaving two separate realizations of the
syndrome measurement circuitry instead of always repeating the same one.
Work in this direction would also be a step toward bringing
sliding-window decoding under DEMs within the scope of the analytical
machinery developed for SC-LDPC codes.
\content{\textbf{Ideas for further research}}
\content{Softer way of decimating VNs}
\content{Systematic study on using different inner decoders (AED,
SED, BPGD, ...)}
\content{Investigate SC-LDPC window decoding wave-like effects}

View File

@@ -0,0 +1,58 @@
\chapter*{Abstract}
% Current state of the art
\Ac{qec} protects fragile quantum states against decoherence by
encoding logical information into a larger number of physical qubits.
To obtain parity information on an encoded state without disturbing it, a
syndrome extraction is performed.
Because the syndrome extraction circuitry is itself implemented on
noisy quantum hardware, practical \ac{qec} must be fault-tolerant,
accounting for errors introduced by the correction procedure itself.
Fault tolerance considerations and the syndrome extraction circuit
are captured by \acp{dem}, which provide a unified framework for passing
this information to the decoder.
Accounting for fault tolerance substantially inflates the
decoding problem.
At the same time, \ac{qec} imposes strict latency constraints due to
the backlog problem, where syndrome data accumulates faster than it
can be decoded.
Together, these factors pose a serious challenge for practical decoders.
Sliding-window decoding addresses this challenge by exploiting the
repeated structure of the syndrome extraction circuitry, partitioning
the check matrix of the \ac{dem} into overlapping windows that can be
decoded sequentially.
Therefore, decoding can begin as soon as the syndrome components
associated with the first window have been measured.
% Our work: Identify research gap
In this thesis, we perform a review of the existing literature on
sliding-window decoding and draw an analogy to windowed
decoding of classical spatially-coupled low-density parity-check
(\acs{sc}-\acs{ldpc}) codes.
We recognize that in contrast to the latter, existing realizations
of sliding-window decoding for \ac{qec} discard the soft information
produced inside one window before moving to the subsequent window.
% Our work: Warm-start
% TODO: Quantify improvement. Also for conclusion
To take this information into account, we propose warm-start
sliding-window decoding, in which the \ac{bp} messages on the edges
crossing into the overlap region of the previous window are reused to
initialize the corresponding messages of the next window.
The warm start is formulated first for standard \ac{bp} and then extended to
\ac{bp} with guided decimation (\acs{bpgd}).
For both standard \ac{bp} and \ac{bpgd} decoding, the warm-start
initialization provides a consistent improvement across all examined
parameter settings.
We attribute this to an effective increase in \ac{bp} iterations on
variable nodes in the overlap regions: Each such VN is processed by
multiple consecutive windows, and warm-starting lets these
invocations accumulate iterations rather than restart from scratch.
Crucially, the warm-start modification incurs no additional
computational cost relative to cold-start decoding, as it differs
only in the initialization of the \ac{bp} messages.

View File

@@ -3,3 +3,109 @@ sed -i "s/Héctor/H{\\\\'e}ctor/" bibliography.bib
sed -i "s/Bombín/Bomb{\\\\'i}n/" bibliography.bib sed -i "s/Bombín/Bomb{\\\\'i}n/" bibliography.bib
sed -i "s/Zémor/Z{\\\\'e}mor/" bibliography.bib sed -i "s/Zémor/Z{\\\\'e}mor/" bibliography.bib
sed -Ezi "s/\s(abstract|note|urldate|url|keywords|file) = \{[^}]*(\{[^}]*\}[^}]*)*\},?\n//g" bibliography.bib sed -Ezi "s/\s(abstract|note|urldate|url|keywords|file) = \{[^}]*(\{[^}]*\}[^}]*)*\},?\n//g" bibliography.bib
# Normalize arXiv-only entries to @misc with howpublished = {arXiv:<id>}.
# Detection: doi matches 10.48550/arXiv.<id>. The IEEEtranSA .bst's @article
# handler needs a journal field (which preprints lack) and ignores publisher,
# so for arXiv preprints we coerce the type to @misc and add howpublished
# (the field the .bst actually prints for @misc).
python3 - <<'PY'
import re
path = "bibliography.bib"
with open(path) as f:
text = f.read()
doi_re = re.compile(r"doi\s*=\s*\{10\.48550/arXiv\.([^}]+)\}")
type_re = re.compile(r"^@([A-Za-z]+)\{", re.MULTILINE)
howpublished_re = re.compile(r"^\s*howpublished\s*=\s*\{", re.MULTILINE)
title_field_re = re.compile(r"\b(title|booktitle)\s*=\s*\{", re.IGNORECASE)
inner_brace_re = re.compile(r"\{([A-Za-z0-9]+)\}")
# Split into entries by scanning for top-level "@type{...}" blocks. We walk
# brace depth so that the closing "}" of the entry is matched correctly even
# if internal fields contain braces.
def split_entries(s):
out, i, n = [], 0, len(s)
while i < n:
m = type_re.search(s, i)
if not m:
out.append(("text", s[i:]))
break
if m.start() > i:
out.append(("text", s[i:m.start()]))
depth, j = 0, m.start()
while j < n:
c = s[j]
if c == "{":
depth += 1
elif c == "}":
depth -= 1
if depth == 0:
j += 1
break
j += 1
out.append(("entry", s[m.start():j]))
i = j
return out
def normalize_arxiv(entry):
doi_m = doi_re.search(entry)
if not doi_m:
return entry
arxiv_id = doi_m.group(1)
entry = type_re.sub("@misc{", entry, count=1)
if not howpublished_re.search(entry):
# insert howpublished as the last field, before the entry-closing "}"
entry = re.sub(
r"(,?)(\s*)\}\s*$",
lambda m: ("," if m.group(1) != "," else m.group(1))
+ m.group(2) + "\thowpublished = {arXiv:" + arxiv_id + "},\n}",
entry,
count=1,
)
return entry
# Strip protective braces around words inside title/booktitle values.
# BibTeX uses "{Word}" inside titles to preserve case against the bibliography
# style's title-casing rules. We keep that protection only when every character
# inside the braces is non-lowercase (e.g. acronyms like {NASA}); for ordinary
# words like {Quantum} we drop the braces so the style's casing applies.
def strip_title_braces(entry):
out, i, n = [], 0, len(entry)
while True:
m = title_field_re.search(entry, i)
if not m:
out.append(entry[i:])
break
out.append(entry[i:m.end()])
depth, j = 1, m.end()
while j < n and depth > 0:
c = entry[j]
if c == "{":
depth += 1
elif c == "}":
depth -= 1
if depth == 0:
break
j += 1
value = entry[m.end():j]
cleaned = inner_brace_re.sub(
lambda mm: mm.group(1) if any(c.islower() for c in mm.group(1)) else mm.group(0),
value,
)
out.append(cleaned)
if j < n:
out.append(entry[j])
i = j + 1
return "".join(out)
def transform(entry):
return strip_title_braces(normalize_arxiv(entry))
parts = split_entries(text)
new_text = "".join(transform(p) if kind == "entry" else p for kind, p in parts)
with open(path, "w") as f:
f.write(new_text)
PY

View File

@@ -29,6 +29,7 @@
\usepackage{colortbl} \usepackage{colortbl}
\usepackage{cleveref} \usepackage{cleveref}
\usepackage{lipsum} \usepackage{lipsum}
\usepackage{booktabs}
\usetikzlibrary{calc, positioning, arrows, fit} \usetikzlibrary{calc, positioning, arrows, fit}
\usetikzlibrary{external} \usetikzlibrary{external}
@@ -42,7 +43,7 @@
\Crefname{equation}{}{} \Crefname{equation}{}{}
\Crefname{section}{Section}{Sections} \Crefname{section}{Section}{Sections}
\Crefname{subsection}{Subsection}{Subsections} \Crefname{subsection}{Section}{Sections}
\Crefname{figure}{Figure}{Figures} \Crefname{figure}{Figure}{Figures}
% %
@@ -89,10 +90,12 @@
% \thesisHeadOfInstitute{Prof. Dr.-Ing. Peter Rost} % \thesisHeadOfInstitute{Prof. Dr.-Ing. Peter Rost}
%\thesisHeadOfInstitute{Prof. Dr.-Ing. Peter Rost\\Prof. Dr.-Ing. %\thesisHeadOfInstitute{Prof. Dr.-Ing. Peter Rost\\Prof. Dr.-Ing.
% Laurent Schmalen} % Laurent Schmalen}
\thesisSupervisor{Jonathan Mandelbaum} \thesisSupervisor{Dr.-Ing. Hedongliang Liu\\ && M.Sc. Jonathan Mandelbaum}
\thesisStartDate{01.11.2025} \thesisStartDate{Nov. 1st, 2025}
\thesisEndDate{04.05.2026} \thesisEndDate{May 4th, 2026}
\thesisSignatureDate{Signature date} \thesisSignatureDate{May 4th, 2026}
\thesisSignature{res/Unterschrift_AT_blue.png}
\thesisSignatureHeight{2.4cm}
\thesisLanguage{english} \thesisLanguage{english}
\begin{document} \begin{document}
@@ -101,13 +104,16 @@
\maketitle \maketitle
\newpage \newpage
% \include{chapters/abstract} \include{chapters/abstract}
\cleardoublepage \cleardoublepage
\pagenumbering{arabic} \pagenumbering{arabic}
\tableofcontents \newgeometry{a4paper,left=3cm,right=3cm,top=2cm,bottom=2.5cm}
\addtocontents{toc}{\protect\vspace*{-9mm}}
\tableofcontents
\cleardoublepage \cleardoublepage
\restoregeometry
\input{chapters/1_introduction.tex} \input{chapters/1_introduction.tex}
\input{chapters/2_fundamentals.tex} \input{chapters/2_fundamentals.tex}
@@ -120,6 +126,11 @@
% \listoftables % \listoftables
% \include{abbreviations} % \include{abbreviations}
\cleardoublepage
\phantomsection
\addcontentsline{toc}{chapter}{List of Abbreviations}
\printacronyms
\bibliography{lib/cel-thesis/IEEEabrv,src/thesis/bibliography} \bibliography{lib/cel-thesis/IEEEabrv,src/thesis/bibliography}
\end{document} \end{document}

Binary file not shown.

After

Width:  |  Height:  |  Size: 181 KiB