98 lines
3.3 KiB
Python
98 lines
3.3 KiB
Python
import numpy as np
|
|
|
|
|
|
# TODO: Unify the interface regarding [0, 1]^n and [-1, 1]^n
|
|
class ProximalDecoder:
|
|
"""Class implementing the Proximal Decoding algorithm. See "Proximal Decoding for LDPC Codes"
|
|
by Tadashi Wadayama, and Satoshi Takabe.
|
|
"""
|
|
|
|
# TODO: Is 'R' actually called 'decoding matrix'?
|
|
def __init__(self, H: np.array, R: np.array, K: int = 100, step_size: float = 0.1,
|
|
gamma: float = 0.05, eta: float = 1.5):
|
|
"""Construct a new ProximalDecoder Object.
|
|
|
|
:param H: Parity Check Matrix
|
|
:param R: Decoding matrix
|
|
:param K: Max number of iterations to perform when decoding
|
|
:param step_size: Step size for the gradient descent process
|
|
:param gamma: Positive constant. Arises in the approximation of the prior PDF
|
|
:param eta: Positive constant slightly larger than one. See 3.2, p. 3
|
|
"""
|
|
self._H = H
|
|
self._R = R
|
|
self._K = K
|
|
self._step_size = step_size
|
|
self._gamma = gamma
|
|
self._eta = eta
|
|
|
|
self._k, self._n = self._H.shape
|
|
self._H_ne_0 = H != 0
|
|
|
|
@staticmethod
|
|
def _L_awgn(s: np.array, y: np.array) -> np.array:
|
|
"""Variation of the negative log-likelihood for the special case of AWGN noise.
|
|
See 4.1, p. 4.
|
|
"""
|
|
return s - y
|
|
|
|
def _grad_h(self, x: np.array) -> np.array:
|
|
"""Gradient of the code-constraint polynomial. See 2.3, p. 2."""
|
|
# Pre-computations
|
|
|
|
A_prod_matrix = np.tile(x, (self._k, 1))
|
|
A_prods = np.prod(A_prod_matrix, axis=1, where=self._H_ne_0)
|
|
|
|
# Calculate gradient
|
|
|
|
sums = np.dot(A_prods**2 - A_prods, self._H)
|
|
|
|
result = 4 * (x**2 - 1) * x + (2 / x) * sums
|
|
|
|
return result
|
|
|
|
# TODO: Is the 'projection onto [-eta, eta]' actually just clipping?
|
|
def _projection(self, x):
|
|
"""Project a vector onto [-eta, eta]^n in order to avoid numerical instability.
|
|
Detailed in 3.2, p. 3 (Equation (15)).
|
|
|
|
:param x:
|
|
:return: x clipped to [-eta, eta]^n
|
|
"""
|
|
return np.clip(x, -self._eta, self._eta)
|
|
|
|
def _check_parity(self, y_hat: np.array) -> bool:
|
|
"""Perform a parity check for a given codeword.
|
|
|
|
:param y_hat: codeword to be checked (element of [0, 1]^n)
|
|
:return: True if the parity check passes, i.e. the codeword is valid. False otherwise
|
|
"""
|
|
syndrome = np.dot(self._H, y_hat) % 2
|
|
return not np.any(syndrome)
|
|
|
|
def decode(self, y: np.array) -> np.array:
|
|
"""Decode a received signal. The algorithm is detailed in 3.2, p.3.
|
|
|
|
This function assumes a BPSK-like modulated signal ([-1, 1]^n instead of [0, 1]^n)
|
|
and an AWGN channel.
|
|
|
|
:param y: Vector of received values. (y = x + w, where 'x' is element of [-1, 1]^n
|
|
and 'w' is noise)
|
|
:return: Most probably sent dataword (element of [0, 1]^k)
|
|
"""
|
|
s = np.zeros(self._n)
|
|
x_hat = np.zeros(self._n)
|
|
for k in range(self._K):
|
|
r = s - self._step_size * self._L_awgn(s, y)
|
|
|
|
s = r - self._gamma * self._grad_h(r)
|
|
s = self._projection(s) # Equation (15)
|
|
|
|
x_hat = np.sign(s)
|
|
x_hat = (x_hat == 1) * 1 # Map the codeword from [-1, 1]^n to [0, 1]^n
|
|
|
|
if self._check_parity(x_hat):
|
|
break
|
|
|
|
return np.dot(self._R, x_hat)
|