Compare commits
4 Commits
d5cf6dacf7
...
5762c602af
| Author | SHA1 | Date | |
|---|---|---|---|
| 5762c602af | |||
| 94b3619487 | |||
| 5c060088e0 | |||
| 9ab80a8385 |
@ -1,6 +1,8 @@
|
|||||||
# Homotopy Continuation
|
# Homotopy Continuation
|
||||||
|
|
||||||
### Introduction
|
## Introduction
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
|
||||||
The aim of a homotopy method consists in solving a system of N nonlinear
|
The aim of a homotopy method consists in solving a system of N nonlinear
|
||||||
equations in N variables \[1, p.1\]:
|
equations in N variables \[1, p.1\]:
|
||||||
@ -85,6 +87,26 @@ between successive points produced by the iterations can be used as a criterion
|
|||||||
for convergence. Of course, if the iterations fail to converge, one must go
|
for convergence. Of course, if the iterations fail to converge, one must go
|
||||||
back to adjust the step size for the Euler’s predictor." [2, p.130]
|
back to adjust the step size for the Euler’s predictor." [2, p.130]
|
||||||
|
|
||||||
|
## Application to Channel Decoding
|
||||||
|
|
||||||
|
We can describe the decoding problem using the code constraint polynomial [3]
|
||||||
|
|
||||||
|
$$
|
||||||
|
h(\bm{x}) = \underbrace{\sum_{i=1}^{n}\left(1-x_i^2\right)^2}_{\text{Bipolar constraint}} + \underbrace{\sum_{j=1}^{m}\left(1 - \left(\prod_{i\in A(j)}x_i\right)\right)^2}_{\text{Parity constraint}},
|
||||||
|
$$
|
||||||
|
|
||||||
|
where $A(j) = \left\{i \in [1:n]: H_{j,i} = 1\right\}$ represents the set of
|
||||||
|
variable nodes involved in parity check j. This polynomial consists of a set of
|
||||||
|
terms representing the bipolar constraint and a set of terms representing the
|
||||||
|
parity constraint. In a similar vein, we can define the following set of
|
||||||
|
polynomial equations to describe codewords:
|
||||||
|
|
||||||
|
$$
|
||||||
|
F = \left[\begin{array}{c}1 - x_1^2 \\ \vdots\\ 1 - x_n^2 \\ 1 - \prod_{i \in A(1)}x_i \\ \vdots\\ 1 - \prod_{i \in A(m)}x_i\end{array}\right] \overset{!}{=} \bm{0}.
|
||||||
|
$$
|
||||||
|
|
||||||
|
This is a problem we can solve using homotopy continuation.
|
||||||
|
|
||||||
______________________________________________________________________
|
______________________________________________________________________
|
||||||
|
|
||||||
## References
|
## References
|
||||||
@ -97,3 +119,7 @@ Philadelphia, PA 19104), 2003. doi: 10.1137/1.9780898719154.
|
|||||||
\[2\]: T. Chen and T.-Y. Li, “Homotopy continuation method for solving systems
|
\[2\]: T. Chen and T.-Y. Li, “Homotopy continuation method for solving systems
|
||||||
of nonlinear and polynomial equations,” Communications in Information and
|
of nonlinear and polynomial equations,” Communications in Information and
|
||||||
Systems, vol. 15, no. 2, pp. 119–307, 2015, doi: 10.4310/CIS.2015.v15.n2.a1.
|
Systems, vol. 15, no. 2, pp. 119–307, 2015, doi: 10.4310/CIS.2015.v15.n2.a1.
|
||||||
|
|
||||||
|
\[3\]: Wadayama, Tadashi, and Satoshi Takabe. "Proximal decoding for LDPC
|
||||||
|
codes." IEICE Transactions on Fundamentals of Electronics, Communications and
|
||||||
|
Computer Sciences 106.3 (2023): 359-367.
|
||||||
|
|||||||
150
python/examples/simulate_error_rate.py
Normal file
150
python/examples/simulate_error_rate.py
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
import typing
|
||||||
|
import numpy as np
|
||||||
|
import galois
|
||||||
|
import argparse
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
# autopep8: off
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
sys.path.append(f"{os.path.dirname(os.path.abspath(__file__))}/../")
|
||||||
|
|
||||||
|
from hccd import utility, homotopy_generator, path_tracker
|
||||||
|
# autopep8: on
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SimulationArgs:
|
||||||
|
euler_step_size: float
|
||||||
|
euler_max_tries: int
|
||||||
|
newton_max_iter: int
|
||||||
|
newton_convergence_threshold: float
|
||||||
|
sigma: int
|
||||||
|
homotopy_iter: int
|
||||||
|
|
||||||
|
max_frames: int
|
||||||
|
target_frame_errors: int
|
||||||
|
|
||||||
|
|
||||||
|
def decode(tracker, y, H, args: SimulationArgs) -> np.ndarray:
|
||||||
|
x_hat = np.mod(np.round(y), 2).astype('int32')
|
||||||
|
|
||||||
|
s = np.concatenate([y, np.array([0])])
|
||||||
|
for i in range(args.homotopy_iter):
|
||||||
|
x_hat = np.mod(np.round(s[:-1]), 2).astype('int32')
|
||||||
|
if not np.any(np.mod(H @ x_hat, 2)):
|
||||||
|
return x_hat
|
||||||
|
|
||||||
|
# if s[-1] > 1.5:
|
||||||
|
# return x_hat
|
||||||
|
|
||||||
|
try:
|
||||||
|
s = tracker.step(s)
|
||||||
|
except:
|
||||||
|
return x_hat
|
||||||
|
|
||||||
|
return x_hat
|
||||||
|
|
||||||
|
|
||||||
|
def simulate_error_rates_for_SNR(H, Eb_N0, args: SimulationArgs) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
||||||
|
GF = galois.GF(2)
|
||||||
|
H_GF = GF(H)
|
||||||
|
G = H_GF.null_space()
|
||||||
|
k, n = G.shape
|
||||||
|
|
||||||
|
homotopy = homotopy_generator.HomotopyGenerator(H)
|
||||||
|
|
||||||
|
# print(f"G: {homotopy.G}")
|
||||||
|
# print(f"F: {homotopy.F}")
|
||||||
|
# print(f"H: {homotopy.H}")
|
||||||
|
# print(f"DH: {homotopy.DH}")
|
||||||
|
|
||||||
|
tracker = path_tracker.PathTracker(homotopy, args.euler_step_size, args.euler_max_tries,
|
||||||
|
args.newton_max_iter, args.newton_convergence_threshold, args.sigma)
|
||||||
|
|
||||||
|
num_frames = 0
|
||||||
|
|
||||||
|
bit_errors = 0
|
||||||
|
frame_errors = 0
|
||||||
|
decoding_failures = 0
|
||||||
|
|
||||||
|
for _ in tqdm(range(args.max_frames)):
|
||||||
|
Eb_N0_lin = 10**(Eb_N0 / 10)
|
||||||
|
N0 = 1 / (2 * k / n * Eb_N0_lin)
|
||||||
|
y = np.zeros(n) + np.sqrt(N0) * np.random.normal(size=n)
|
||||||
|
|
||||||
|
x_hat = decode(tracker, y, H, args)
|
||||||
|
|
||||||
|
bit_errors += np.sum(x_hat != np.zeros(n))
|
||||||
|
frame_errors += np.any(x_hat != np.zeros(n))
|
||||||
|
|
||||||
|
if np.any(np.mod(H @ x_hat, 2)):
|
||||||
|
decoding_failures += 1
|
||||||
|
|
||||||
|
num_frames += 1
|
||||||
|
|
||||||
|
if frame_errors > args.target_frame_errors:
|
||||||
|
break
|
||||||
|
|
||||||
|
BER = bit_errors / (num_frames * n)
|
||||||
|
FER = frame_errors / num_frames
|
||||||
|
DFR = decoding_failures / num_frames
|
||||||
|
|
||||||
|
return FER, BER, DFR
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Parse command line arguments
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
|
parser.add_argument("-c", "--code", type=str, required=True,
|
||||||
|
help="Path to the alist file containing the parity check matrix of the code")
|
||||||
|
# TODO: Extend this script to multiple SRNs
|
||||||
|
parser.add_argument("--snr", type=float, required=True,
|
||||||
|
help="Eb/N0 to use for this simulation")
|
||||||
|
parser.add_argument("--max-frames", type=int, default=int(1e6),
|
||||||
|
help="Maximum number of frames to simulate")
|
||||||
|
parser.add_argument("--target-frame-errors", type=int, default=200,
|
||||||
|
help="Number of frame errors after which to stop the simulation")
|
||||||
|
|
||||||
|
parser.add_argument("--euler-step-size", type=float,
|
||||||
|
default=0.05, help="Step size for Euler predictor")
|
||||||
|
parser.add_argument("--euler-max-tries", type=int, default=5,
|
||||||
|
help="Maximum number of tries for Euler predictor")
|
||||||
|
parser.add_argument("--newton-max-iter", type=int, default=5,
|
||||||
|
help="Maximum number of iterations for Newton corrector")
|
||||||
|
parser.add_argument("--newton-convergence-threshold", type=float,
|
||||||
|
default=0.01, help="Convergence threshold for Newton corrector")
|
||||||
|
parser.add_argument("-s", "--sigma", type=int, default=1,
|
||||||
|
help="Direction in which the path is traced")
|
||||||
|
parser.add_argument("-n", "--homotopy-iter", type=int, default=20,
|
||||||
|
help="Number of iterations of the homotopy continuation method to perform for each decoding")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# TODO: Name this section properly
|
||||||
|
# Do stuff
|
||||||
|
|
||||||
|
H = utility.read_alist_file(args.code)
|
||||||
|
|
||||||
|
simulation_args = SimulationArgs(
|
||||||
|
euler_step_size=args.euler_step_size,
|
||||||
|
euler_max_tries=args.euler_max_tries,
|
||||||
|
newton_max_iter=args.newton_max_iter,
|
||||||
|
newton_convergence_threshold=args.newton_convergence_threshold,
|
||||||
|
sigma=args.sigma,
|
||||||
|
homotopy_iter=args.homotopy_iter,
|
||||||
|
max_frames=args.max_frames,
|
||||||
|
target_frame_errors=args.target_frame_errors)
|
||||||
|
|
||||||
|
FER, BER, DFR = simulate_error_rates_for_SNR(H, args.snr, simulation_args)
|
||||||
|
|
||||||
|
print(f"FER: {FER}")
|
||||||
|
print(f"DFR: {DFR}")
|
||||||
|
print(f"BER: {BER}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
@ -1,6 +0,0 @@
|
|||||||
def main():
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -29,7 +29,7 @@ class PathTracker:
|
|||||||
|
|
||||||
def step(self, y):
|
def step(self, y):
|
||||||
"""Perform one predictor-corrector step."""
|
"""Perform one predictor-corrector step."""
|
||||||
return self.transparent_step(y)[0]
|
return self.transparent_step(y)[3]
|
||||||
|
|
||||||
def transparent_step(self, y) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray,]:
|
def transparent_step(self, y) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray,]:
|
||||||
"""Perform one predictor-corrector step, returning intermediate results."""
|
"""Perform one predictor-corrector step, returning intermediate results."""
|
||||||
|
|||||||
29
python/hccd/utility.py
Normal file
29
python/hccd/utility.py
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_alist_header(header):
|
||||||
|
size = header.split()
|
||||||
|
return int(size[0]), int(size[1])
|
||||||
|
|
||||||
|
|
||||||
|
def read_alist_file(filename):
|
||||||
|
"""
|
||||||
|
This function reads in an alist file and creates the
|
||||||
|
corresponding parity check matrix H. The format of alist
|
||||||
|
files is described at:
|
||||||
|
http://www.inference.phy.cam.ac.uk/mackay/codes/alist.html
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(filename, 'r') as myfile:
|
||||||
|
data = myfile.readlines()
|
||||||
|
numCols, numRows = _parse_alist_header(data[0])
|
||||||
|
|
||||||
|
H = np.zeros((numRows, numCols))
|
||||||
|
|
||||||
|
# The locations of 1s starts in the 5th line of the file
|
||||||
|
for lineNumber in np.arange(4, 4 + numCols):
|
||||||
|
indices = data[lineNumber].split()
|
||||||
|
for index in indices:
|
||||||
|
H[int(index) - 1, lineNumber - 4] = 1
|
||||||
|
|
||||||
|
return H.astype(np.int32)
|
||||||
Loading…
Reference in New Issue
Block a user