Moved python files from sw to sw/python; Moved scritps into sw/python/scripts
This commit is contained in:
2
sw/python/decoders/__init__.py
Normal file
2
sw/python/decoders/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
"""This package contains a number of different decoder implementations for
|
||||
LDPC codes."""
|
||||
52
sw/python/decoders/maximum_likelihood.py
Normal file
52
sw/python/decoders/maximum_likelihood.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import numpy as np
|
||||
import itertools
|
||||
|
||||
|
||||
class MLDecoder:
|
||||
"""This class naively implements a soft decision decoder. The decoder
|
||||
calculates the correlation between the received signal and each codeword
|
||||
and then chooses the one with the largest correlation.
|
||||
"""
|
||||
|
||||
def __init__(self, G: np.array, H: np.array):
|
||||
"""Construct a new SoftDecisionDecoder object.
|
||||
|
||||
:param G: Generator matrix
|
||||
:param H: Parity check matrix
|
||||
"""
|
||||
self._G = G
|
||||
self._H = H
|
||||
self._datawords, self._codewords = self._gen_codewords()
|
||||
|
||||
# The codewords, but mapped to [-1, 1]^n
|
||||
self._codewords_bpsk = 1 - 2 * self._codewords
|
||||
|
||||
def _gen_codewords(self) -> np.array:
|
||||
"""Generate a list of all possible codewords.
|
||||
|
||||
:return: Numpy array of the form [[codeword_1], [codeword_2], ...]
|
||||
(Each generated codeword is an element of [0, 1]^n)
|
||||
"""
|
||||
k, n = self._G.shape
|
||||
|
||||
# Generate a list of all possible data words
|
||||
u_lst = [list(i) for i in itertools.product([0, 1], repeat=k)]
|
||||
u_lst = np.array(u_lst)
|
||||
|
||||
# Map each data word onto a codeword
|
||||
c_lst = np.dot(u_lst, self._G) % 2
|
||||
|
||||
return u_lst, c_lst
|
||||
|
||||
def decode(self, y: np.array) -> np.array:
|
||||
"""Decode a received signal.
|
||||
|
||||
This function assumes a BPSK modulated signal.
|
||||
|
||||
:param y: Vector of received values. (y = x + w, where 'x' is
|
||||
element of [-1, 1]^n and 'w' is noise)
|
||||
:return: Most probably sent codeword (element of [0, 1]^k)
|
||||
"""
|
||||
correlations = np.dot(self._codewords_bpsk, y)
|
||||
|
||||
return self._codewords[np.argmax(correlations)]
|
||||
97
sw/python/decoders/proximal.py
Normal file
97
sw/python/decoders/proximal.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import numpy as np
|
||||
|
||||
|
||||
class ProximalDecoder:
|
||||
"""Class implementing the Proximal Decoding algorithm. See "Proximal
|
||||
Decoding for LDPC Codes"
|
||||
by Tadashi Wadayama, and Satoshi Takabe.
|
||||
"""
|
||||
|
||||
def __init__(self, H: np.array, K: int = 1000, omega: float = 0.0002,
|
||||
gamma: float = 0.05, eta: float = 1.5):
|
||||
"""Construct a new ProximalDecoder Object.
|
||||
|
||||
:param H: Parity Check Matrix
|
||||
:param K: Max number of iterations to perform when decoding
|
||||
:param omega: Step size for the gradient descent process
|
||||
:param gamma: Positive constant. Arises in the approximation of the
|
||||
prior PDF
|
||||
:param eta: Positive constant slightly larger than one. See 3.2, p. 3
|
||||
"""
|
||||
self._H = H
|
||||
self._K = K
|
||||
self._step_size = omega
|
||||
self._gamma = gamma
|
||||
self._eta = eta
|
||||
|
||||
self._k, self._n = self._H.shape
|
||||
self._H_ne_0 = H != 0
|
||||
|
||||
@staticmethod
|
||||
def _L_awgn(s: np.array, y: np.array) -> np.array:
|
||||
"""Variation of the negative log-likelihood for the special case of
|
||||
AWGN noise. See 4.1, p. 4.
|
||||
"""
|
||||
return s - y
|
||||
|
||||
def _grad_h(self, x: np.array) -> np.array:
|
||||
"""Gradient of the code-constraint polynomial. See 2.3, p. 2."""
|
||||
# Pre-computations
|
||||
|
||||
A_prod_matrix = np.tile(x, (self._k, 1))
|
||||
A_prods = np.prod(A_prod_matrix, axis=1, where=self._H_ne_0)
|
||||
|
||||
# Calculate gradient
|
||||
|
||||
sums = np.dot(A_prods ** 2 - A_prods, self._H)
|
||||
|
||||
result = 4 * (x ** 2 - 1) * x + (2 / x) * sums
|
||||
|
||||
return result
|
||||
|
||||
def _projection(self, v):
|
||||
"""Project a vector onto [-eta, eta]^n in order to avoid numerical
|
||||
instability. Detailed in 3.2, p. 3 (Equation (15)).
|
||||
|
||||
:param v: Vector to project
|
||||
:return: x clipped to [-eta, eta]^n
|
||||
"""
|
||||
return np.clip(v, -self._eta, self._eta)
|
||||
|
||||
def _check_parity(self, x_hat: np.array) -> bool:
|
||||
"""Perform a parity check for a given codeword.
|
||||
|
||||
:param x_hat: codeword to be checked (element of [0, 1]^n)
|
||||
:return: True if the parity check passes, i.e. the codeword is
|
||||
valid. False otherwise
|
||||
"""
|
||||
syndrome = np.dot(self._H, x_hat) % 2
|
||||
return not np.any(syndrome)
|
||||
|
||||
def decode(self, y: np.array) -> np.array:
|
||||
"""Decode a received signal. The algorithm is detailed in 3.2, p.3.
|
||||
|
||||
This function assumes a BPSK modulated signal and an AWGN channel.
|
||||
|
||||
:param y: Vector of received values. (y = x + w, where 'x' is
|
||||
element of [-1, 1]^n and 'w' is noise)
|
||||
:return: Most probably sent codeword (element of [0, 1]^n). If
|
||||
decoding fails, the returned value is 'None'
|
||||
"""
|
||||
s = np.zeros(self._n)
|
||||
x_hat = np.zeros(self._n)
|
||||
for k in range(self._K):
|
||||
r = s - self._step_size * self._L_awgn(s, y)
|
||||
|
||||
s = r - self._gamma * self._grad_h(r)
|
||||
s = self._projection(s) # Equation (15)
|
||||
|
||||
x_hat = np.sign(s)
|
||||
|
||||
# Map the codeword from [ -1, 1]^n to [0, 1]^n
|
||||
x_hat = (x_hat == -1) * 1
|
||||
|
||||
if self._check_parity(x_hat):
|
||||
return x_hat
|
||||
|
||||
return None
|
||||
Reference in New Issue
Block a user