Now calculating the error rate based on the codewrords, not datawords
This commit is contained in:
@@ -11,7 +11,7 @@ class SoftDecisionDecoder:
|
||||
"""
|
||||
|
||||
# TODO: Is 'R' actually called 'decoding matrix'?
|
||||
def __init__(self, G: np.array, H: np.array, R: np.array):
|
||||
def __init__(self, G: np.array, H: np.array):
|
||||
"""Construct a new SoftDecisionDecoder object.
|
||||
|
||||
:param G: Generator matrix
|
||||
@@ -20,9 +20,8 @@ class SoftDecisionDecoder:
|
||||
"""
|
||||
self._G = G
|
||||
self._H = H
|
||||
self._R = R
|
||||
self._datawords, self._codewords = self._gen_codewords()
|
||||
self._codewords_bpsk = self._codewords * 2 - 1 # The codewords, but mapped to [-1, 1]^n
|
||||
self._codewords_bpsk = 1 - 2 * self._codewords # The codewords, but mapped to [-1, 1]^n
|
||||
|
||||
def _gen_codewords(self) -> np.array:
|
||||
"""Generate a list of all possible codewords.
|
||||
@@ -43,7 +42,7 @@ class SoftDecisionDecoder:
|
||||
def decode(self, y: np.array) -> np.array:
|
||||
"""Decode a received signal.
|
||||
|
||||
This function assumes a BPSK-like modulated signal ([-1, 1]^n instead of [0, 1]^n).
|
||||
This function assumes a BPSK modulated signal.
|
||||
|
||||
:param y: Vector of received values. (y = x + w, where 'x' is element of [-1, 1]^n
|
||||
and 'w' is noise)
|
||||
@@ -51,4 +50,4 @@ class SoftDecisionDecoder:
|
||||
"""
|
||||
correlations = np.dot(self._codewords_bpsk, y)
|
||||
|
||||
return np.dot(self._R, self._codewords[numpy.argmax(correlations)])
|
||||
return self._codewords[numpy.argmax(correlations)]
|
||||
|
||||
@@ -8,7 +8,7 @@ class ProximalDecoder:
|
||||
"""
|
||||
|
||||
# TODO: Is 'R' actually called 'decoding matrix'?
|
||||
def __init__(self, H: np.array, R: np.array, K: int = 100, step_size: float = 0.1,
|
||||
def __init__(self, H: np.array, K: int = 100, step_size: float = 0.1,
|
||||
gamma: float = 0.05, eta: float = 1.5):
|
||||
"""Construct a new ProximalDecoder Object.
|
||||
|
||||
@@ -20,7 +20,6 @@ class ProximalDecoder:
|
||||
:param eta: Positive constant slightly larger than one. See 3.2, p. 3
|
||||
"""
|
||||
self._H = H
|
||||
self._R = R
|
||||
self._K = K
|
||||
self._step_size = step_size
|
||||
self._gamma = gamma
|
||||
@@ -51,7 +50,6 @@ class ProximalDecoder:
|
||||
|
||||
return result
|
||||
|
||||
# TODO: Is the 'projection onto [-eta, eta]' actually just clipping?
|
||||
def _projection(self, x):
|
||||
"""Project a vector onto [-eta, eta]^n in order to avoid numerical instability.
|
||||
Detailed in 3.2, p. 3 (Equation (15)).
|
||||
@@ -73,12 +71,11 @@ class ProximalDecoder:
|
||||
def decode(self, y: np.array) -> np.array:
|
||||
"""Decode a received signal. The algorithm is detailed in 3.2, p.3.
|
||||
|
||||
This function assumes a BPSK-like modulated signal ([-1, 1]^n instead of [0, 1]^n)
|
||||
and an AWGN channel.
|
||||
This function assumes a BPSK modulated signal and an AWGN channel.
|
||||
|
||||
:param y: Vector of received values. (y = x + w, where 'x' is element of [-1, 1]^n
|
||||
and 'w' is noise)
|
||||
:return: Most probably sent dataword (element of [0, 1]^k)
|
||||
:return: Most probably sent codeword (element of [0, 1]^k)
|
||||
"""
|
||||
s = np.zeros(self._n)
|
||||
x_hat = np.zeros(self._n)
|
||||
@@ -89,9 +86,9 @@ class ProximalDecoder:
|
||||
s = self._projection(s) # Equation (15)
|
||||
|
||||
x_hat = np.sign(s)
|
||||
x_hat = (x_hat == 1) * 1 # Map the codeword from [-1, 1]^n to [0, 1]^n
|
||||
x_hat = (x_hat == -1) * 1 # Map the codeword from [-1, 1]^n to [0, 1]^n
|
||||
|
||||
if self._check_parity(x_hat):
|
||||
break
|
||||
|
||||
return np.dot(self._R, x_hat)
|
||||
return x_hat
|
||||
|
||||
Reference in New Issue
Block a user